instance_id
stringlengths 12
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2015-01-06 14:05:07
2025-04-29 17:56:51
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
158k
| patch
stringlengths 261
20.8k
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 280
206k
| meta
dict | version
stringclasses 463
values | install_config
dict | requirements
stringlengths 93
34k
⌀ | environment
stringlengths 772
20k
⌀ | FAIL_TO_PASS
sequencelengths 1
856
| FAIL_TO_FAIL
sequencelengths 0
536
| PASS_TO_PASS
sequencelengths 0
7.87k
| PASS_TO_FAIL
sequencelengths 0
92
| license_name
stringclasses 35
values | __index_level_0__
int64 11
21.4k
| num_tokens_patch
int64 103
4.99k
| before_filepaths
sequencelengths 0
14
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
stchris__untangle-43 | 2119763f721b23588b6223f28264843f6fc9f750 | 2017-06-09 18:57:03 | cd441fd6c3375430cca9443aea10e927f3d7fe68 | diff --git a/untangle.py b/untangle.py
index 1263056..6d1e966 100755
--- a/untangle.py
+++ b/untangle.py
@@ -14,6 +14,7 @@
License: MIT License - http://www.opensource.org/licenses/mit-license.php
"""
import os
+import keyword
from xml.sax import make_parser, handler
try:
from StringIO import StringIO
@@ -133,6 +134,11 @@ class Handler(handler.ContentHandler):
name = name.replace('-', '_')
name = name.replace('.', '_')
name = name.replace(':', '_')
+
+ # adding trailing _ for keywords
+ if keyword.iskeyword(name):
+ name += '_'
+
attrs = dict()
for k, v in attributes.items():
attrs[k] = v
| If an XML tag is a Python keyword SyntaxError is raised on access
Namely, I had this issue with a class tag.
My first thought is to use aliases for these. | stchris/untangle | diff --git a/tests/tests.py b/tests/tests.py
index 76a1ef4..33558e3 100755
--- a/tests/tests.py
+++ b/tests/tests.py
@@ -93,6 +93,14 @@ class FromStringTestCase(unittest.TestCase):
self.assertEqual('child1', getattr(o.root, 'child')[0]['name'])
+ def test_python_keyword(self):
+ o = untangle.parse("<class><return/><pass/><None/></class>")
+ self.assert_(o is not None)
+ self.assert_(o.class_ is not None)
+ self.assert_(o.class_.return_ is not None)
+ self.assert_(o.class_.pass_ is not None)
+ self.assert_(o.class_.None_ is not None)
+
class InvalidTestCase(unittest.TestCase):
""" Test corner cases """
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/stchris/untangle.git@2119763f721b23588b6223f28264843f6fc9f750#egg=untangle
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: untangle
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/untangle
| [
"tests/tests.py::FromStringTestCase::test_python_keyword"
] | [] | [
"tests/tests.py::FromStringTestCase::test_attribute_protocol",
"tests/tests.py::FromStringTestCase::test_basic",
"tests/tests.py::FromStringTestCase::test_basic_with_decl",
"tests/tests.py::FromStringTestCase::test_grouping",
"tests/tests.py::FromStringTestCase::test_single_root",
"tests/tests.py::FromStringTestCase::test_with_attributes",
"tests/tests.py::InvalidTestCase::test_empty_xml",
"tests/tests.py::InvalidTestCase::test_invalid_xml",
"tests/tests.py::InvalidTestCase::test_none_xml",
"tests/tests.py::PomXmlTestCase::test_lengths",
"tests/tests.py::PomXmlTestCase::test_parent",
"tests/tests.py::NamespaceTestCase::test_namespace",
"tests/tests.py::IterationTestCase::test_multiple_children",
"tests/tests.py::IterationTestCase::test_single_child",
"tests/tests.py::TwimlTestCase::test_twiml_dir",
"tests/tests.py::UnicodeTestCase::test_lengths",
"tests/tests.py::UnicodeTestCase::test_unicode_file",
"tests/tests.py::UnicodeTestCase::test_unicode_string",
"tests/tests.py::FileObjects::test_file_object",
"tests/tests.py::UntangleInObjectsTestCase::test_object",
"tests/tests.py::UrlStringTestCase::test_is_url",
"tests/tests.py::TestSaxHandler::test_cdata",
"tests/tests.py::TestSaxHandler::test_empty_handler",
"tests/tests.py::TestSaxHandler::test_handler",
"tests/tests.py::FigsTestCase::test_figs",
"tests/tests.py::ParserFeatureTestCase::test_invalid_external_dtd",
"tests/tests.py::ParserFeatureTestCase::test_invalid_feature",
"tests/tests.py::ParserFeatureTestCase::test_valid_feature"
] | [] | MIT License | 1,354 | 197 | [
"untangle.py"
] |
|
d3dave__cough-3 | b2a4ebe62f953e35beff00a9a7d7426cc20f6350 | 2017-06-10 20:39:05 | b2a4ebe62f953e35beff00a9a7d7426cc20f6350 | diff --git a/cough/file.py b/cough/file.py
index 84338e7..d96aa8a 100644
--- a/cough/file.py
+++ b/cough/file.py
@@ -125,25 +125,17 @@ class ObjectModule:
return bytes(body_buffer)
def dump_sections(self):
+ data_buf_offset = FileHeader.struct.size + 40 * len(self.sections)
+ hdrs_buf = bytearray()
data_buf = bytearray()
- sections_offsets = []
- reloc_offsets = []
for sec in self.sections:
if sec.data:
- sections_offsets.append(len(data_buf))
+ sec.pointer_to_raw_data = data_buf_offset + len(data_buf)
data_buf += sec.data
if sec.relocations:
- reloc_offsets.append(len(data_buf))
+ sec.pointer_to_relocations = data_buf_offset + len(data_buf)
for reloc in sec.relocations:
data_buf += reloc.pack()
+ hdrs_buf += sec.get_header()
- sections_buffer_offset = FileHeader.struct.size + 40 * len(self.sections)
- hdrs_and_data_buf = bytearray()
- for i, sec in enumerate(self.sections):
- if sec.data:
- sec.pointer_to_raw_data = sections_buffer_offset + sections_offsets[i]
- if sec.relocations:
- sec.pointer_to_relocations = sections_buffer_offset + reloc_offsets[i]
- hdrs_and_data_buf += sec.get_header()
- hdrs_and_data_buf += data_buf
- return bytes(hdrs_and_data_buf)
+ return bytes(hdrs_buf + data_buf)
| Exception when adding an uninitialized section before an initialized section
file.py:133 is inconsistent with file.py:144 | d3dave/cough | diff --git a/tests/test_coff.py b/tests/test_coff.py
index 8ca9ce4..2193214 100644
--- a/tests/test_coff.py
+++ b/tests/test_coff.py
@@ -63,3 +63,16 @@ def test_reloc():
subprocess.run(['PowerShell.exe', BUILD_SCRIPT, file.name, '/out:' + '"' + exe_path + '"'], check=True)
proc = subprocess.run([exe_path], stdout=subprocess.PIPE, check=True)
assert proc.stdout == b'A'
+
+
+def test_uninit_before_init():
+ module = cough.ObjectModule()
+
+ sec_uninit = cough.Section(b'uninit', cough.SectionFlags.CNT_UNINITIALIZED_DATA)
+ sec_uninit.size_of_raw_data = 0x400
+ module.sections.append(sec_uninit)
+
+ sec_init = cough.Section(b'init', 0, b'\xAA\xBB\xCC\xDD')
+ module.sections.append(sec_init)
+
+ assert module.get_buffer()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/d3dave/cough.git@b2a4ebe62f953e35beff00a9a7d7426cc20f6350#egg=cough
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: cough
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/cough
| [
"tests/test_coff.py::test_uninit_before_init"
] | [
"tests/test_coff.py::test_coff",
"tests/test_coff.py::test_reloc"
] | [] | [] | MIT License | 1,359 | 360 | [
"cough/file.py"
] |
|
maximkulkin__lollipop-55 | 360bbc8f9c2b6203ab5af8a3cd051f852ba8dae3 | 2017-06-12 19:52:49 | 360bbc8f9c2b6203ab5af8a3cd051f852ba8dae3 | diff --git a/lollipop/types.py b/lollipop/types.py
index 3ec8a50..acb7f3b 100644
--- a/lollipop/types.py
+++ b/lollipop/types.py
@@ -679,16 +679,22 @@ class Dict(Type):
errors_builder = ValidationErrorBuilder()
result = {}
for k, v in iteritems(data):
- value_type = self.value_types.get(k)
- if value_type is None:
- continue
try:
k = self.key_type.load(k, *args, **kwargs)
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
+ if k is MISSING:
+ continue
+
+ value_type = self.value_types.get(k)
+ if value_type is None:
+ continue
+
try:
- result[k] = value_type.load(v, *args, **kwargs)
+ value = value_type.load(v, *args, **kwargs)
+ if value is not MISSING:
+ result[k] = value
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
@@ -715,8 +721,13 @@ class Dict(Type):
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
+ if k is MISSING:
+ continue
+
try:
- result[k] = value_type.dump(v, *args, **kwargs)
+ value = value_type.dump(v, *args, **kwargs)
+ if value is not MISSING:
+ result[k] = value
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
| Asymmetry in load/dump for Dict with key_type and dict as values_type
I think there is a problem with the Dict type when using both key_type verification and a values_type dictionary:
In Dict.dump, the key is used in its original / non-dumped form to lookup the value type. However, in Dict.load, the dumped key is used to lookup the value type.
This works fine when using native types as key type such as String and Integer since they map to the same loaded/dumped value. But it's causing a problem when using a more complex key type (e.g. in my case an Enum that dumps to a string).
I believe, in Dict.load, key_type.load should be called **before** the lookup of the value type, so that the lookup is again performed with the original / non-dumped value.
| maximkulkin/lollipop | diff --git a/tests/test_types.py b/tests/test_types.py
index e652fb6..6489dbf 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -641,6 +641,30 @@ class TestDict(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMix
assert Dict(Any(), key_type=Integer())\
.load({'123': 'foo', '456': 'bar'}) == {123: 'foo', 456: 'bar'}
+ def test_loading_dict_with_custom_key_type_and_values_of_different_types(self):
+ assert Dict({1: Integer(), 2: String()}, key_type=Integer())\
+ .load({'1': '123', '2': 'bar'}) == {1: 123, 2: 'bar'}
+
+ def test_loading_skips_key_value_if_custom_key_type_loads_to_missing(self):
+ class CustomKeyType(String):
+ def load(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomKeyType, self).load(data, *args, **kwargs)
+
+ assert Dict(String(), key_type=CustomKeyType())\
+ .load({'foo': 'hello', 'bar': 'goodbye'}) == {'bar': 'goodbye'}
+
+ def test_loading_skips_key_value_if_value_type_loads_to_missing(self):
+ class CustomValueType(String):
+ def load(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomValueType, self).load(data, *args, **kwargs)
+
+ assert Dict(CustomValueType())\
+ .load({'key1': 'foo', 'key2': 'bar'}) == {'key2': 'bar'}
+
def test_loading_accepts_any_key_if_key_type_is_not_specified(self):
assert Dict(Any())\
.load({'123': 'foo', 456: 'bar'}) == {'123': 'foo', 456: 'bar'}
@@ -719,7 +743,27 @@ class TestDict(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMix
def test_dumping_dict_with_values_of_different_types(self):
value = {'foo': 1, 'bar': 'hello', 'baz': True}
assert Dict({'foo': Integer(), 'bar': String(), 'baz': Boolean()})\
- .load(value) == value
+ .dump(value) == value
+
+ def test_dumping_skips_key_value_if_custom_key_type_loads_to_missing(self):
+ class CustomKeyType(String):
+ def dump(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomKeyType, self).load(data, *args, **kwargs)
+
+ assert Dict(String(), key_type=CustomKeyType())\
+ .dump({'foo': 'hello', 'bar': 'goodbye'}) == {'bar': 'goodbye'}
+
+ def test_dumping_skips_key_value_if_value_type_loads_to_missing(self):
+ class CustomValueType(String):
+ def dump(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomValueType, self).load(data, *args, **kwargs)
+
+ assert Dict(CustomValueType())\
+ .dump({'key1': 'foo', 'key2': 'bar'}) == {'key2': 'bar'}
def test_dumping_accepts_any_value_if_value_types_are_not_specified(self):
assert Dict()\
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
-e git+https://github.com/maximkulkin/lollipop.git@360bbc8f9c2b6203ab5af8a3cd051f852ba8dae3#egg=lollipop
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: lollipop
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/lollipop
| [
"tests/test_types.py::TestDict::test_loading_dict_with_custom_key_type_and_values_of_different_types",
"tests/test_types.py::TestDict::test_loading_skips_key_value_if_custom_key_type_loads_to_missing",
"tests/test_types.py::TestDict::test_loading_skips_key_value_if_value_type_loads_to_missing",
"tests/test_types.py::TestDict::test_dumping_skips_key_value_if_custom_key_type_loads_to_missing",
"tests/test_types.py::TestDict::test_dumping_skips_key_value_if_value_type_loads_to_missing"
] | [] | [
"tests/test_types.py::TestString::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestString::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestString::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestString::test_loading_passes_context_to_validator",
"tests/test_types.py::TestString::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestString::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestString::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestString::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestString::test_loading_None_raises_required_error",
"tests/test_types.py::TestString::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestString::test_dumping_None_raises_required_error",
"tests/test_types.py::TestString::test_name",
"tests/test_types.py::TestString::test_description",
"tests/test_types.py::TestString::test_loading_string_value",
"tests/test_types.py::TestString::test_loading_non_string_value_raises_ValidationError",
"tests/test_types.py::TestString::test_dumping_string_value",
"tests/test_types.py::TestString::test_dumping_non_string_value_raises_ValidationError",
"tests/test_types.py::TestNumber::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestNumber::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestNumber::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestNumber::test_loading_passes_context_to_validator",
"tests/test_types.py::TestNumber::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestNumber::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestNumber::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestNumber::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestNumber::test_loading_None_raises_required_error",
"tests/test_types.py::TestNumber::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestNumber::test_dumping_None_raises_required_error",
"tests/test_types.py::TestNumber::test_name",
"tests/test_types.py::TestNumber::test_description",
"tests/test_types.py::TestNumber::test_loading_float_value",
"tests/test_types.py::TestNumber::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestNumber::test_dumping_float_value",
"tests/test_types.py::TestNumber::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestInteger::test_loading_integer_value",
"tests/test_types.py::TestInteger::test_loading_long_value",
"tests/test_types.py::TestInteger::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestInteger::test_dumping_integer_value",
"tests/test_types.py::TestInteger::test_dumping_long_value",
"tests/test_types.py::TestInteger::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestFloat::test_loading_float_value",
"tests/test_types.py::TestFloat::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestFloat::test_dumping_float_value",
"tests/test_types.py::TestFloat::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestBoolean::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestBoolean::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestBoolean::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestBoolean::test_loading_passes_context_to_validator",
"tests/test_types.py::TestBoolean::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestBoolean::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestBoolean::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestBoolean::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestBoolean::test_loading_None_raises_required_error",
"tests/test_types.py::TestBoolean::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestBoolean::test_dumping_None_raises_required_error",
"tests/test_types.py::TestBoolean::test_name",
"tests/test_types.py::TestBoolean::test_description",
"tests/test_types.py::TestBoolean::test_loading_boolean_value",
"tests/test_types.py::TestBoolean::test_loading_non_boolean_value_raises_ValidationError",
"tests/test_types.py::TestBoolean::test_dumping_boolean_value",
"tests/test_types.py::TestBoolean::test_dumping_non_boolean_value_raises_ValidationError",
"tests/test_types.py::TestDateTime::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDateTime::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDateTime::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDateTime::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDateTime::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDateTime::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDateTime::test_loading_None_raises_required_error",
"tests/test_types.py::TestDateTime::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDateTime::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDateTime::test_name",
"tests/test_types.py::TestDateTime::test_description",
"tests/test_types.py::TestDateTime::test_loading_string_date",
"tests/test_types.py::TestDateTime::test_loading_using_predefined_format",
"tests/test_types.py::TestDateTime::test_loading_using_custom_format",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDateTime::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDateTime::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDateTime::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestDateTime::test_dumping_date",
"tests/test_types.py::TestDateTime::test_dumping_using_predefined_format",
"tests/test_types.py::TestDateTime::test_dumping_using_custom_format",
"tests/test_types.py::TestDateTime::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDate::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDate::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDate::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDate::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDate::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDate::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDate::test_loading_None_raises_required_error",
"tests/test_types.py::TestDate::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDate::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDate::test_name",
"tests/test_types.py::TestDate::test_description",
"tests/test_types.py::TestDate::test_loading_string_date",
"tests/test_types.py::TestDate::test_loading_using_predefined_format",
"tests/test_types.py::TestDate::test_loading_using_custom_format",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDate::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDate::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDate::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestDate::test_dumping_date",
"tests/test_types.py::TestDate::test_dumping_using_predefined_format",
"tests/test_types.py::TestDate::test_dumping_using_custom_format",
"tests/test_types.py::TestDate::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestTime::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestTime::test_loading_passes_context_to_validator",
"tests/test_types.py::TestTime::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestTime::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestTime::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestTime::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestTime::test_loading_None_raises_required_error",
"tests/test_types.py::TestTime::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestTime::test_dumping_None_raises_required_error",
"tests/test_types.py::TestTime::test_name",
"tests/test_types.py::TestTime::test_description",
"tests/test_types.py::TestTime::test_loading_string_date",
"tests/test_types.py::TestTime::test_loading_using_custom_format",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestTime::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestTime::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestTime::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestTime::test_dumping_date",
"tests/test_types.py::TestTime::test_dumping_using_custom_format",
"tests/test_types.py::TestTime::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestList::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestList::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestList::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestList::test_loading_passes_context_to_validator",
"tests/test_types.py::TestList::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestList::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestList::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestList::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestList::test_loading_None_raises_required_error",
"tests/test_types.py::TestList::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestList::test_dumping_None_raises_required_error",
"tests/test_types.py::TestList::test_name",
"tests/test_types.py::TestList::test_description",
"tests/test_types.py::TestList::test_loading_list_value",
"tests/test_types.py::TestList::test_loading_non_list_value_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_list_value_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_list_value_with_items_that_have_validation_errors_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_does_not_validate_whole_list_if_items_have_errors",
"tests/test_types.py::TestList::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestList::test_dumping_list_value",
"tests/test_types.py::TestList::test_dumping_sequence_value",
"tests/test_types.py::TestList::test_dumping_non_list_value_raises_ValidationError",
"tests/test_types.py::TestList::test_dumping_list_value_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestList::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTuple::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestTuple::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestTuple::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestTuple::test_loading_passes_context_to_validator",
"tests/test_types.py::TestTuple::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestTuple::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestTuple::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestTuple::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestTuple::test_loading_None_raises_required_error",
"tests/test_types.py::TestTuple::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestTuple::test_dumping_None_raises_required_error",
"tests/test_types.py::TestTuple::test_name",
"tests/test_types.py::TestTuple::test_description",
"tests/test_types.py::TestTuple::test_loading_tuple_with_values_of_same_type",
"tests/test_types.py::TestTuple::test_loading_tuple_with_values_of_different_type",
"tests/test_types.py::TestTuple::test_loading_non_tuple_value_raises_ValidationError",
"tests/test_types.py::TestTuple::test_loading_tuple_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestTuple::test_loading_tuple_with_items_that_have_validation_errors_raises_ValidationErrors",
"tests/test_types.py::TestTuple::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestTuple::test_dump_tuple",
"tests/test_types.py::TestTuple::test_dump_sequence",
"tests/test_types.py::TestTuple::test_dumping_non_tuple_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_sequence_of_incorrect_length_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_tuple_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_tuple_passes_context_to_inner_type_dump",
"tests/test_types.py::TestDict::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDict::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDict::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDict::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDict::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDict::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDict::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDict::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDict::test_loading_None_raises_required_error",
"tests/test_types.py::TestDict::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDict::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDict::test_name",
"tests/test_types.py::TestDict::test_description",
"tests/test_types.py::TestDict::test_loading_dict_with_custom_key_type",
"tests/test_types.py::TestDict::test_loading_accepts_any_key_if_key_type_is_not_specified",
"tests/test_types.py::TestDict::test_loading_dict_with_values_of_the_same_type",
"tests/test_types.py::TestDict::test_loading_dict_with_values_of_different_types",
"tests/test_types.py::TestDict::test_loading_accepts_any_value_if_value_types_are_not_specified",
"tests/test_types.py::TestDict::test_loading_non_dict_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_incorrect_key_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_items_that_have_validation_errors_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_does_not_validate_whole_list_if_items_have_errors",
"tests/test_types.py::TestDict::test_loading_dict_with_incorrect_key_value_and_incorrect_value_raises_ValidationError_with_both_errors",
"tests/test_types.py::TestDict::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestDict::test_dumping_dict_with_custom_key_type",
"tests/test_types.py::TestDict::test_dumping_accepts_any_key_if_key_type_is_not_specified",
"tests/test_types.py::TestDict::test_dumping_dict_with_values_of_the_same_type",
"tests/test_types.py::TestDict::test_dumping_dict_with_values_of_different_types",
"tests/test_types.py::TestDict::test_dumping_accepts_any_value_if_value_types_are_not_specified",
"tests/test_types.py::TestDict::test_dumping_non_dict_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_incorrect_key_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_incorrect_key_value_and_incorrect_value_raises_ValidationError_with_both_errors",
"tests/test_types.py::TestDict::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestOneOf::test_loading_values_of_one_of_listed_types",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_value_is_of_unlisted_type",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_type_hint_is_unknown",
"tests/test_types.py::TestOneOf::test_loading_with_type_hinting",
"tests/test_types.py::TestOneOf::test_loading_with_type_hinting_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_dumping_values_of_one_of_listed_types",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_value_is_of_unlisted_type",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_type_hint_is_unknown",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_serialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_dumping_with_type_hinting",
"tests/test_types.py::TestOneOf::test_dumping_with_type_hinting_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_given_object_attribute",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_configured_object_attribute",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_field_name_transformed_with_given_name_transformation",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_given_object_attribute",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_configured_object_attribute",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_field_name_transformed_with_given_name_transformation",
"tests/test_types.py::TestAttributeField::test_loading_value_with_field_type",
"tests/test_types.py::TestAttributeField::test_loading_given_attribute_regardless_of_attribute_override",
"tests/test_types.py::TestAttributeField::test_loading_missing_value_if_attribute_does_not_exist",
"tests/test_types.py::TestAttributeField::test_loading_passes_context_to_field_type_load",
"tests/test_types.py::TestAttributeField::test_dumping_given_attribute_from_object",
"tests/test_types.py::TestAttributeField::test_dumping_object_attribute_with_field_type",
"tests/test_types.py::TestAttributeField::test_dumping_a_different_attribute_from_object",
"tests/test_types.py::TestAttributeField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestMethodField::test_get_value_returns_result_of_calling_configured_method_on_object",
"tests/test_types.py::TestMethodField::test_get_value_returns_result_of_calling_method_calculated_by_given_function_on_object",
"tests/test_types.py::TestMethodField::test_get_value_returns_MISSING_if_get_method_is_not_specified",
"tests/test_types.py::TestMethodField::test_get_value_raises_ValueError_if_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_get_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestMethodField::test_get_value_passes_context_to_method",
"tests/test_types.py::TestMethodField::test_set_value_calls_configure_method_on_object",
"tests/test_types.py::TestMethodField::test_set_value_calls_method_calculated_by_given_function_on_object",
"tests/test_types.py::TestMethodField::test_set_value_does_not_do_anything_if_set_method_is_not_specified",
"tests/test_types.py::TestMethodField::test_set_value_raises_ValueError_if_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_set_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestMethodField::test_set_value_passes_context_to_method",
"tests/test_types.py::TestMethodField::test_loading_value_with_field_type",
"tests/test_types.py::TestMethodField::test_loading_value_returns_loaded_value",
"tests/test_types.py::TestMethodField::test_loading_value_passes_context_to_field_types_load",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_into",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_load_into_is_not_available",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_None",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_MISSING",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_passes_context_to_field_types_load_into",
"tests/test_types.py::TestMethodField::test_dumping_result_of_given_objects_method",
"tests/test_types.py::TestMethodField::test_dumping_result_of_objects_method_with_field_type",
"tests/test_types.py::TestMethodField::test_dumping_result_of_a_different_objects_method",
"tests/test_types.py::TestMethodField::test_dumping_raises_ValueError_if_given_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_dumping_raises_ValueError_if_given_method_is_not_callable",
"tests/test_types.py::TestMethodField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestFunctionField::test_get_value_returns_result_of_calling_configured_function_with_object",
"tests/test_types.py::TestFunctionField::test_get_value_returns_MISSING_if_get_func_is_not_specified",
"tests/test_types.py::TestFunctionField::test_get_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestFunctionField::test_get_value_passes_context_to_func",
"tests/test_types.py::TestFunctionField::test_set_value_calls_configure_method_on_object",
"tests/test_types.py::TestFunctionField::test_set_value_does_not_do_anything_if_set_func_is_not_specified",
"tests/test_types.py::TestFunctionField::test_set_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestFunctionField::test_set_value_passes_context_to_func",
"tests/test_types.py::TestFunctionField::test_loading_value_with_field_type",
"tests/test_types.py::TestFunctionField::test_loading_value_returns_loaded_value",
"tests/test_types.py::TestFunctionField::test_loading_value_passes_context_to_field_types_load",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_into",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_load_into_is_not_available",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_None",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_MISSING",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_passes_context_to_field_types_load_into",
"tests/test_types.py::TestFunctionField::test_dumping_result_of_given_function",
"tests/test_types.py::TestFunctionField::test_dumping_result_of_objects_method_with_field_type",
"tests/test_types.py::TestFunctionField::test_dumping_raises_ValueError_if_given_get_func_is_not_callable",
"tests/test_types.py::TestFunctionField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestConstant::test_name",
"tests/test_types.py::TestConstant::test_description",
"tests/test_types.py::TestConstant::test_loading_always_returns_missing",
"tests/test_types.py::TestConstant::test_loading_raises_ValidationError_if_loaded_value_is_not_a_constant_value_specified",
"tests/test_types.py::TestConstant::test_loading_value_with_inner_type_before_checking_value_correctness",
"tests/test_types.py::TestConstant::test_customizing_error_message_when_value_is_incorrect",
"tests/test_types.py::TestConstant::test_dumping_always_returns_given_value",
"tests/test_types.py::TestConstant::test_dumping_given_constant_with_field_type",
"tests/test_types.py::TestObject::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestObject::test_loading_passes_context_to_validator",
"tests/test_types.py::TestObject::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestObject::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestObject::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestObject::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestObject::test_loading_None_raises_required_error",
"tests/test_types.py::TestObject::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestObject::test_dumping_None_raises_required_error",
"tests/test_types.py::TestObject::test_name",
"tests/test_types.py::TestObject::test_description",
"tests/test_types.py::TestObject::test_default_field_type_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_default_field_type_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_constructor_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_constructor_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_allow_extra_fields_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_allow_extra_fields_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_immutable_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_immutable_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_ordered_is_unset_by_default",
"tests/test_types.py::TestObject::test_iheriting_ordered_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_loading_dict_value",
"tests/test_types.py::TestObject::test_loading_non_dict_values_raises_ValidationError",
"tests/test_types.py::TestObject::test_loading_bypasses_values_for_which_field_type_returns_missing_value",
"tests/test_types.py::TestObject::test_loading_dict_with_field_errors_raises_ValidationError_with_all_field_errors_merged",
"tests/test_types.py::TestObject::test_loading_dict_with_field_errors_does_not_run_whole_object_validators",
"tests/test_types.py::TestObject::test_loading_calls_field_load_passing_field_name_and_whole_data",
"tests/test_types.py::TestObject::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestObject::test_constructing_objects_with_default_constructor_on_load",
"tests/test_types.py::TestObject::test_constructing_custom_objects_on_load",
"tests/test_types.py::TestObject::test_load_ignores_extra_fields_by_default",
"tests/test_types.py::TestObject::test_load_raises_ValidationError_if_reporting_extra_fields",
"tests/test_types.py::TestObject::test_loading_inherited_fields",
"tests/test_types.py::TestObject::test_loading_multiple_inherited_fields",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_if_inherited_fields_have_errors",
"tests/test_types.py::TestObject::test_loading_only_specified_fields",
"tests/test_types.py::TestObject::test_loading_only_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_loading_all_but_specified_base_class_fields",
"tests/test_types.py::TestObject::test_loading_all_but_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_returns_that_object",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_passes_all_object_attributes_to_validators",
"tests/test_types.py::TestObject::test_loading_values_into_immutable_object_creates_a_copy",
"tests/test_types.py::TestObject::test_loading_values_into_immutable_object_does_not_modify_original_object",
"tests/test_types.py::TestObject::test_loading_values_into_nested_object_of_immutable_object_creates_copy_of_it_regardless_of_nested_objects_immutable_flag",
"tests/test_types.py::TestObject::test_loading_values_into_nested_object_of_immutable_object_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_loading_values_into_nested_objects_with_inplace_False_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_ignores_missing_fields",
"tests/test_types.py::TestObject::test_loading_MISSING_into_existing_object_does_not_do_anything",
"tests/test_types.py::TestObject::test_loading_None_into_existing_objects_raises_ValidationError",
"tests/test_types.py::TestObject::test_loading_None_into_field_of_existing_object_passes_None_to_field",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_raises_ValidationError_if_data_contains_errors",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_annotates_field_errors_with_field_names",
"tests/test_types.py::TestObject::test_loading_values_into_existing_nested_objects",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_when_nested_object_does_not_exist",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_None_if_data_is_valid",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_errors_if_data_contains_errors",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_errors_if_validator_fails",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_dumping_object_attributes",
"tests/test_types.py::TestObject::test_dumping_calls_field_dump_passing_field_name_and_whole_object",
"tests/test_types.py::TestObject::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestObject::test_dumping_inherited_fields",
"tests/test_types.py::TestObject::test_dumping_multiple_inherited_fields",
"tests/test_types.py::TestObject::test_dumping_only_specified_fields_of_base_classes",
"tests/test_types.py::TestObject::test_dumping_only_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_dumping_all_but_specified_base_class_fields",
"tests/test_types.py::TestObject::test_dumping_all_but_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_shortcut_for_specifying_constant_fields",
"tests/test_types.py::TestObject::test_dumping_fields_in_declared_order_if_ordered_is_True",
"tests/test_types.py::TestOptional::test_loading_value_calls_load_of_inner_type",
"tests/test_types.py::TestOptional::test_loading_missing_value_returns_None",
"tests/test_types.py::TestOptional::test_loading_None_returns_None",
"tests/test_types.py::TestOptional::test_loading_missing_value_does_not_call_inner_type_load",
"tests/test_types.py::TestOptional::test_loading_None_does_not_call_inner_type_load",
"tests/test_types.py::TestOptional::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestOptional::test_overriding_missing_value_on_load",
"tests/test_types.py::TestOptional::test_overriding_None_value_on_load",
"tests/test_types.py::TestOptional::test_using_function_to_override_value_on_load",
"tests/test_types.py::TestOptional::test_loading_passes_context_to_override_function",
"tests/test_types.py::TestOptional::test_dumping_value_calls_dump_of_inner_type",
"tests/test_types.py::TestOptional::test_dumping_missing_value_returns_None",
"tests/test_types.py::TestOptional::test_dumping_None_returns_None",
"tests/test_types.py::TestOptional::test_dumping_missing_value_does_not_call_inner_type_dump",
"tests/test_types.py::TestOptional::test_dumping_None_does_not_call_inner_type_dump",
"tests/test_types.py::TestOptional::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestOptional::test_overriding_missing_value_on_dump",
"tests/test_types.py::TestOptional::test_overriding_None_value_on_dump",
"tests/test_types.py::TestOptional::test_using_function_to_override_value_on_dump",
"tests/test_types.py::TestOptional::test_dumping_passes_context_to_override_function",
"tests/test_types.py::TestLoadOnly::test_name",
"tests/test_types.py::TestLoadOnly::test_description",
"tests/test_types.py::TestLoadOnly::test_loading_returns_inner_type_load_result",
"tests/test_types.py::TestLoadOnly::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestLoadOnly::test_dumping_always_returns_missing",
"tests/test_types.py::TestLoadOnly::test_dumping_does_not_call_inner_type_dump",
"tests/test_types.py::TestDumpOnly::test_name",
"tests/test_types.py::TestDumpOnly::test_description",
"tests/test_types.py::TestDumpOnly::test_loading_always_returns_missing",
"tests/test_types.py::TestDumpOnly::test_loading_does_not_call_inner_type_dump",
"tests/test_types.py::TestDumpOnly::test_dumping_returns_inner_type_dump_result",
"tests/test_types.py::TestDumpOnly::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTransform::test_name",
"tests/test_types.py::TestTransform::test_description",
"tests/test_types.py::TestTransform::test_loading_calls_pre_load_with_original_value",
"tests/test_types.py::TestTransform::test_loading_calls_inner_type_load_with_result_of_pre_load",
"tests/test_types.py::TestTransform::test_loading_calls_post_load_with_result_of_inner_type_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_inner_type_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_pre_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_post_load",
"tests/test_types.py::TestTransform::test_dumping_calls_pre_dump_with_original_value",
"tests/test_types.py::TestTransform::test_dumping_calls_inner_type_dump_with_result_of_pre_dump",
"tests/test_types.py::TestTransform::test_dumping_calls_post_dump_with_result_of_inner_type_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_pre_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_post_dump",
"tests/test_types.py::TestValidatedType::test_returns_subclass_of_given_type",
"tests/test_types.py::TestValidatedType::test_returns_type_that_has_single_given_validator",
"tests/test_types.py::TestValidatedType::test_accepts_context_unaware_validators",
"tests/test_types.py::TestValidatedType::test_returns_type_that_has_multiple_given_validators",
"tests/test_types.py::TestValidatedType::test_specifying_more_validators_on_type_instantiation",
"tests/test_types.py::TestValidatedType::test_new_type_accepts_same_constructor_arguments_as_base_type"
] | [] | MIT License | 1,363 | 369 | [
"lollipop/types.py"
] |
|
jboss-dockerfiles__dogen-149 | a475173ce00b2d6686c601ffc46a8d2bc3ed0a7f | 2017-06-13 12:32:57 | bc9263c5b683fdf901ac1646286ee3908b85dcdc | diff --git a/dogen/generator.py b/dogen/generator.py
index 0562cd8..415dcb0 100644
--- a/dogen/generator.py
+++ b/dogen/generator.py
@@ -313,9 +313,10 @@ class Generator(object):
algorithms.append(supported_algorithm)
try:
- if os.path.exists(filename) and algorithms:
- for algorithm in algorithms:
- self.check_sum(filename, source[algorithm], algorithm)
+ if os.path.exists(filename):
+ if algorithms:
+ for algorithm in algorithms:
+ self.check_sum(filename, source[algorithm], algorithm)
passed = True
except Exception as e:
self.log.debug(str(e))
@@ -361,7 +362,7 @@ class Generator(object):
else:
if hint:
self.log.info(hint)
- self.log.info("Please download the '%s' artifact manually and save it as '%s'" % (artifact, filename))
+ self.log.info("Please download the '%s' artifact manually and save it as '%s'" % (artifact, filename))
raise Error("Artifact '%s' could not be fetched!" % artifact)
def check_sum(self, filename, checksum, algorithm):
| Dogen fails if md5 is not set for 'local' artifacts
Using dogen 2.0. In my image.yaml, if I have the following:
````
sources:
- artifact: myfile.tar.gz
````
I run dogen mounting `pwd` into /tmp/output inside the dogen container, and put the myfile.tar.gz inside the build/ folder. Dogen is invoked with /tmp/output/build as the destination folder.
I would expect that the local file myfile.tar.gz is added to the resulting image. However, dogen throws an error of not being able to find the artifact. In addition, the helpful hint on where to download it, i.e. "Please download the 'myfile.tar.gz' artifact manually and save it as '/tmp/output/build/myfile.tar.gz' is missing unless I also add the 'hint' field.
If I do this:
```
sources:
- artifact: myfile.tar.gz
md5: <md5sum of myfile.tar.gz>
```
Then dogen is successful creating the Dockerfile.
I would like to be able to omit the md5 field and still be able to generate the Dockerfile, as this is useful for generating Dockerfiles for locally built artifacts.
A detailed example can be found here: https://pastebin.com/31fqRGUa
| jboss-dockerfiles/dogen | diff --git a/tests/test_unit_generate_handle_files.py b/tests/test_unit_generate_handle_files.py
index daf240a..e811ebb 100644
--- a/tests/test_unit_generate_handle_files.py
+++ b/tests/test_unit_generate_handle_files.py
@@ -155,6 +155,17 @@ class TestHandleSources(unittest.TestCase):
self.assertEquals(str(cm.exception), "Could not download artifact from orignal location, reason: original")
mock_fetch_file.assert_has_calls([mock.call('http://cache/get?#algorithm#=#hash#', 'target/jboss-eap.zip'), mock.call('http://host.com/jboss-eap.zip', 'target/jboss-eap.zip')])
+ @mock.patch('dogen.generator.Generator._fetch_file')
+ def test_fetch_artifact_should_fail_with_nice_message_when_artifact_without_url_is_not_found_locally(self, mock_fetch_file):
+ self.generator.cfg = {'sources': [{'artifact': 'jboss-eap.zip'}]}
+
+ with self.assertRaises(Error) as cm:
+ self.generator.handle_sources()
+
+ self.assertEquals(str(cm.exception), "Artifact 'jboss-eap.zip' could not be fetched!")
+ mock_fetch_file.assert_not_called()
+ self.log.info.assert_any_call("Please download the 'jboss-eap.zip' artifact manually and save it as 'target/jboss-eap.zip'")
+
@mock.patch('dogen.generator.Generator._fetch_file')
def test_fetch_artifact_should_fetch_file_from_cache(self, mock_fetch_file):
self.generator.cfg = {'sources': [{'artifact': 'http://host.com/jboss-eap.zip'}]}
@@ -169,7 +180,8 @@ class TestHandleSources(unittest.TestCase):
mock_fetch_file.assert_called_with('http://cache/get?jboss-eap.zip', 'target/jboss-eap.zip')
@mock.patch('dogen.generator.Generator._fetch_file')
- def test_fetch_artifact_should_fetch_file(self, mock_fetch_file):
+ @mock.patch('dogen.generator.os.path.exists', return_value=False)
+ def test_fetch_artifact_should_fetch_file(self, mock_path_exists, mock_fetch_file):
self.generator.cfg = {'sources': [{'artifact': 'http://host.com/jboss-eap.zip'}]}
self.generator.handle_sources()
# No checksum provided and computed
@@ -187,3 +199,12 @@ class TestHandleSources(unittest.TestCase):
self.assertEquals(self.generator.cfg['artifacts'], {'jboss-eap.zip': None})
mock_fetch_file.assert_has_calls([mock.call('http://cache/get?#algorithm#=#hash#', 'target/jboss-eap.zip'), mock.call('http://host.com/jboss-eap.zip', 'target/jboss-eap.zip')])
+
+ @mock.patch('dogen.generator.Generator._fetch_file')
+ @mock.patch('dogen.generator.os.path.exists', return_value=True)
+ def test_fetch_artifact_should_not_fetch_file_if_exists(self, mock_path_exists, mock_fetch_file):
+ self.generator.cfg = {'sources': [{'artifact': 'http://host.com/jboss-eap.zip'}]}
+ self.generator.handle_sources()
+ # No checksum provided and computed
+ self.assertEquals(self.generator.cfg['artifacts'], {'jboss-eap.zip': None})
+ mock_fetch_file.assert_not_called()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
colorlog==6.9.0
coverage==7.8.0
docopt==0.6.2
-e git+https://github.com/jboss-dockerfiles/dogen.git@a475173ce00b2d6686c601ffc46a8d2bc3ed0a7f#egg=dogen
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pykwalify==1.8.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: dogen
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorlog==6.9.0
- coverage==7.8.0
- docopt==0.6.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pykwalify==1.8.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/dogen
| [
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_should_fail_with_nice_message_when_artifact_without_url_is_not_found_locally",
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_should_not_fetch_file_if_exists"
] | [] | [
"tests/test_unit_generate_handle_files.py::TestURL::test_local_file",
"tests/test_unit_generate_handle_files.py::TestURL::test_remote_http_file",
"tests/test_unit_generate_handle_files.py::TestURL::test_remote_https_file",
"tests/test_unit_generate_handle_files.py::TestFetchFile::test_fetching_with_filename",
"tests/test_unit_generate_handle_files.py::TestFetchFile::test_fetching_with_tmpfile",
"tests/test_unit_generate_handle_files.py::TestCustomTemplateHandling::test_do_not_fail_if_no_template_is_provided",
"tests/test_unit_generate_handle_files.py::TestCustomTemplateHandling::test_fetch_template_success",
"tests/test_unit_generate_handle_files.py::TestCustomTemplateHandling::test_fetch_template_with_error",
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_should_download_from_original_location_if_cached_location_failed",
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_should_fail_when_cached_download_failed_and_original_failed_too",
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_should_fail_when_fetching_fails",
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_should_fetch_file",
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_should_fetch_file_from_cache",
"tests/test_unit_generate_handle_files.py::TestHandleSources::test_fetch_artifact_without_url_should_fail"
] | [] | MIT License | 1,364 | 282 | [
"dogen/generator.py"
] |
|
rorodata__firefly-12 | 8f85f769b450eb45d9b4e3a338e988a042bf7459 | 2017-06-15 06:05:45 | 8f85f769b450eb45d9b4e3a338e988a042bf7459 | diff --git a/firefly/main.py b/firefly/main.py
index bb06d97..37eb583 100644
--- a/firefly/main.py
+++ b/firefly/main.py
@@ -7,7 +7,7 @@ from .server import FireflyServer
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-b", "--bind", dest="ADDRESS", default="127.0.0.1:8000")
- p.add_argument("function", help="function to serve")
+ p.add_argument("functions", nargs='+', help="functions to serve")
return p.parse_args()
def load_function(function_spec):
@@ -17,7 +17,14 @@ def load_function(function_spec):
mod_name, func_name = function_spec.rsplit(".", 1)
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
- return func
+ return (func_name, func)
+
+def load_functions(function_specs):
+ return [load_function(function_spec) for function_spec in function_specs]
+
+def add_routes(app, functions):
+ for name, function in functions:
+ app.add_route('/'+name, function)
def main():
# ensure current directory is added to sys.path
@@ -25,10 +32,10 @@ def main():
sys.path.insert(0, "")
args = parse_args()
- function = load_function(args.function)
+ functions = load_functions(args.functions)
app = Firefly()
- app.add_route("/", function)
+ add_routes(app, functions)
server = FireflyServer(app, {"bind": args.ADDRESS})
server.run()
| Allow firefly to support multiple functions
It should take multiple functions as command line arguments and expose all of them.
```
$ firefly myfile.square myfile.cube
```
And use:
```
$ curl -d '{"x": 5}' http://localhost:8000/square
25
$ curl -d '{"x": 5}' http://localhost:8000/cube
125
``` | rorodata/firefly | diff --git a/tests/test_main.py b/tests/test_main.py
new file mode 100644
index 0000000..7ecfbf2
--- /dev/null
+++ b/tests/test_main.py
@@ -0,0 +1,8 @@
+import os
+from firefly.main import load_function
+
+def test_load_functions():
+ os.path.exists2 = os.path.exists
+ name, func = load_function("os.path.exists2")
+ assert name == "exists2"
+ assert func == os.path.exists
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/rorodata/firefly.git@8f85f769b450eb45d9b4e3a338e988a042bf7459#egg=Firefly
gunicorn==19.7.1
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
WebOb==1.7.2
zipp==3.6.0
| name: firefly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- gunicorn==19.7.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- webob==1.7.2
- zipp==3.6.0
prefix: /opt/conda/envs/firefly
| [
"tests/test_main.py::test_load_functions"
] | [] | [] | [] | Apache License 2.0 | 1,373 | 381 | [
"firefly/main.py"
] |
|
rorodata__firefly-13 | 6f199213a35bf87d17c594c023bf6ed4360f70a0 | 2017-06-15 09:17:43 | 6f199213a35bf87d17c594c023bf6ed4360f70a0 | diff --git a/firefly/app.py b/firefly/app.py
index c317648..d3c0da8 100644
--- a/firefly/app.py
+++ b/firefly/app.py
@@ -3,13 +3,28 @@ from webob.exc import HTTPNotFound
import json
from .validator import validate_args, ValidationError
from .utils import json_encode
+from .version import __version__
class Firefly(object):
def __init__(self):
self.mapping = {}
+ self.add_route('/', self.generate_index,internal=True)
- def add_route(self, path, function, **kwargs):
- self.mapping[path] = FireflyFunction(function, **kwargs)
+ def add_route(self, path, function, function_name=None, **kwargs):
+ self.mapping[path] = FireflyFunction(function, function_name, **kwargs)
+
+ def generate_function_list(self):
+ return {f.name: {"path": path, "doc": f.doc}
+ for path, f in self.mapping.items()
+ if f.options.get("internal") != True}
+
+ def generate_index(self):
+ help_dict = {
+ "app": "firefly",
+ "version": __version__,
+ "functions": self.generate_function_list()
+ }
+ return help_dict
def __call__(self, environ, start_response):
request = Request(environ)
@@ -25,10 +40,16 @@ class Firefly(object):
class FireflyFunction(object):
- def __init__(self, function, **kwargs):
+ def __init__(self, function, function_name=None, **options):
self.function = function
+ self.options = options
+ self.name = function_name or function.__name__
+ self.doc = function.__doc__ or ""
def __call__(self, request):
+ if self.options.get("internal", False):
+ return self.make_response(self.function())
+
kwargs = self.get_inputs(request)
try:
validate_args(self.function, kwargs)
diff --git a/firefly/main.py b/firefly/main.py
index 37eb583..dd4a4fb 100644
--- a/firefly/main.py
+++ b/firefly/main.py
@@ -24,7 +24,7 @@ def load_functions(function_specs):
def add_routes(app, functions):
for name, function in functions:
- app.add_route('/'+name, function)
+ app.add_route('/'+name, function, name)
def main():
# ensure current directory is added to sys.path
| Add the ability to find all the available functions served by a firefly service
The user should be able to find the functions available in a firefly service. Right now a server supports only one function, but that is going to change soon.
So, we need to identify the right way to expose the list of functions. It could either be on `/` or some other endpoint like `/_list`. Look at the other RPC implementations and see how they work. We don't have to follow them, but that would give a good idea. It would be better to provide the docstring and argument names along with the function listing.
Please discuss the plan here before implementing. | rorodata/firefly | diff --git a/tests/test_app.py b/tests/test_app.py
index edad1b9..183c914 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -1,13 +1,41 @@
from webob import Request, Response
-from firefly.app import FireflyFunction
+from firefly.app import Firefly, FireflyFunction
def square(a):
+ '''Computes square'''
return a**2
+class TestFirefly:
+ def test_generate_function_list(self):
+ firefly = Firefly()
+ assert firefly.generate_function_list() == {}
+
+ firefly.add_route("/square", square, "square")
+ returned_dict = {
+ "square": {
+ "path": "/square",
+ "doc": "Computes square"
+ }
+ }
+ assert firefly.generate_function_list() == returned_dict
+
+ def test_generate_function_list_for_func_name(self):
+ firefly = Firefly()
+ firefly.add_route("/sq2", square, "sq")
+ returned_dict = {
+ "sq": {
+ "path": "/sq2",
+ "doc": "Computes square"
+ }
+ }
+ assert firefly.generate_function_list() == returned_dict
+
+
+
class TestFireflyFunction:
def test_call(self):
func = FireflyFunction(square)
- request = Request.blank("/", POST='{"a": 3}')
+ request = Request.blank("/square", POST='{"a": 3}')
response = func(request)
assert response.status == '200 OK'
assert response.text == '9'
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/rorodata/firefly.git@6f199213a35bf87d17c594c023bf6ed4360f70a0#egg=Firefly
gunicorn==19.7.1
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
WebOb==1.7.2
| name: firefly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- gunicorn==19.7.1
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
- webob==1.7.2
prefix: /opt/conda/envs/firefly
| [
"tests/test_app.py::TestFirefly::test_generate_function_list",
"tests/test_app.py::TestFirefly::test_generate_function_list_for_func_name"
] | [] | [
"tests/test_app.py::TestFireflyFunction::test_call"
] | [] | Apache License 2.0 | 1,374 | 579 | [
"firefly/app.py",
"firefly/main.py"
] |
|
frictionlessdata__goodtables-py-197 | 25db6e4c20efc0a626323ed3bbef23845acbaf47 | 2017-06-15 12:39:43 | 25db6e4c20efc0a626323ed3bbef23845acbaf47 | roll: @amercader
Please take a look. It addresses https://sentry.io/open-knowledge/goodtablesio-production/issues/287753774/ | diff --git a/goodtables/presets/table.py b/goodtables/presets/table.py
index 0248096..940a85d 100644
--- a/goodtables/presets/table.py
+++ b/goodtables/presets/table.py
@@ -39,6 +39,10 @@ def table(source, schema=None, **options):
warnings.append(
'Table schema "%s" has a validation error "%s"' %
(schema, str(error).splitlines()[0]))
+ except Exception as error:
+ warnings.append(
+ 'Table Schema "%s" has a loading error "%s"' %
+ (schema, error))
# Add table
if not warnings:
| Catch jsontableschema.exceptions.InvalidJSONError
# Overview
For `table` and `datapackage` datasets it has to be caught. See - https://sentry.io/open-knowledge/goodtablesio-production/issues/287753774/ | frictionlessdata/goodtables-py | diff --git a/tests/presets/test_table.py b/tests/presets/test_table.py
index 1281a86..91fe320 100644
--- a/tests/presets/test_table.py
+++ b/tests/presets/test_table.py
@@ -20,3 +20,10 @@ def test_preset_table_but_got_datapackage_issue_187():
assert len(warnings) == 1
assert len(tables) == 0
assert 'Use "datapackage" preset' in warnings[0]
+
+
+def test_preset_table_invalid_json_issue_196():
+ warnings, tables = presets.table('valid.csv', schema='data/invalid_json.json')
+ assert len(warnings) == 1
+ assert len(tables) == 0
+ assert 'has a loading error' in warnings[0]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install --upgrade -e .[develop,ods]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"mock",
"pyyaml",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
click==6.7
datapackage==0.8.9
distlib==0.3.9
et-xmlfile==1.1.0
ezodf==0.3.2
filelock==3.4.1
future==0.18.3
-e git+https://github.com/frictionlessdata/goodtables-py.git@25db6e4c20efc0a626323ed3bbef23845acbaf47#egg=goodtables
greenlet==2.0.2
idna==3.10
ijson==3.3.0
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isodate==0.6.1
jmespath==0.10.0
jsonlines==3.1.0
jsonschema==2.6.0
jsontableschema==0.10.1
linear-tsv==1.1.0
lxml==3.8.0
mccabe==0.7.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
openpyxl==3.1.3
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.10.0
pydocstyle==6.3.0
pyflakes==3.0.1
pylama==7.7.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-dateutil==2.9.0.post0
PyYAML==6.0.1
requests==2.27.1
rfc3986==0.4.1
s3transfer==0.5.2
six==1.17.0
snowballstemmer==2.2.0
SQLAlchemy==1.4.54
tabulator==1.53.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
unicodecsv==0.14.1
urllib3==1.26.20
virtualenv==20.17.1
xlrd==2.0.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: goodtables-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- chardet==5.0.0
- charset-normalizer==2.0.12
- click==6.7
- datapackage==0.8.9
- distlib==0.3.9
- et-xmlfile==1.1.0
- ezodf==0.3.2
- filelock==3.4.1
- future==0.18.3
- greenlet==2.0.2
- idna==3.10
- ijson==3.3.0
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isodate==0.6.1
- jmespath==0.10.0
- jsonlines==3.1.0
- jsonschema==2.6.0
- jsontableschema==0.10.1
- linear-tsv==1.1.0
- lxml==3.8.0
- mccabe==0.7.0
- mock==5.2.0
- openpyxl==3.1.3
- platformdirs==2.4.0
- pycodestyle==2.10.0
- pydocstyle==6.3.0
- pyflakes==3.0.1
- pylama==7.7.1
- python-dateutil==2.9.0.post0
- pyyaml==6.0.1
- requests==2.27.1
- rfc3986==0.4.1
- s3transfer==0.5.2
- six==1.17.0
- snowballstemmer==2.2.0
- sqlalchemy==1.4.54
- tabulator==1.53.5
- tox==3.28.0
- unicodecsv==0.14.1
- urllib3==1.26.20
- virtualenv==20.17.1
- xlrd==2.0.1
prefix: /opt/conda/envs/goodtables-py
| [
"tests/presets/test_table.py::test_preset_table_invalid_json_issue_196"
] | [] | [
"tests/presets/test_table.py::test_preset_table",
"tests/presets/test_table.py::test_preset_table_but_got_datapackage_issue_187"
] | [] | MIT License | 1,375 | 169 | [
"goodtables/presets/table.py"
] |
dask__dask-2466 | 37c3ae2e091412f5e2fdf3c957c383e89e4c8bb2 | 2017-06-16 14:00:21 | a18308c6384f2a8cd7c0c50749a20bd067842ef9 | jcrist: > Unsure how to test this
Any way to test using moto, and skip if moto not installed?
If this is hard to test, I'd also be fine merging without.
martindurant: Yeah, moto doesn't disallow writing, even when anonymous, so I don't know what parameters I could pass to s3fs that would have any effect, where as local writes take no parameters at all. | diff --git a/dask/array/fft.py b/dask/array/fft.py
index cfccda548..2f64c94ee 100644
--- a/dask/array/fft.py
+++ b/dask/array/fft.py
@@ -228,9 +228,9 @@ def _fftfreq_helper(n, d=1.0, chunks=None, real=False):
s = n // 2 + 1 if real else n
t = l - s
- chunks = _normalize_chunks(chunks, (s,))[0] + (t,)
+ chunks = _normalize_chunks(chunks, (s,))
- r = _linspace(0, 1, l, chunks=chunks)
+ r = _linspace(0, 1, l, chunks=(chunks[0] + (t,),))
if real:
n_2 = n // 2 + 1
@@ -239,6 +239,9 @@ def _fftfreq_helper(n, d=1.0, chunks=None, real=False):
n_2 = (n + 1) // 2
r = _concatenate([r[:n_2], r[n_2:-1] - 1])
+ if r.chunks != chunks:
+ r = r.rechunk(chunks)
+
r /= d
return r
diff --git a/dask/bag/core.py b/dask/bag/core.py
index 1cb43c01b..6cc7a8ab6 100644
--- a/dask/bag/core.py
+++ b/dask/bag/core.py
@@ -118,7 +118,8 @@ def optimize(dsk, keys, fuse_keys=None, rename_fused_keys=True, **kwargs):
def to_textfiles(b, path, name_function=None, compression='infer',
- encoding=system_encoding, compute=True, get=None):
+ encoding=system_encoding, compute=True, get=None,
+ storage_options=None):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
@@ -174,7 +175,7 @@ def to_textfiles(b, path, name_function=None, compression='infer',
"""
from dask import delayed
writes = write_bytes(b.to_delayed(), path, name_function, compression,
- encoding=encoding)
+ encoding=encoding, **(storage_options or {}))
# Use Bag optimizations on these delayed objects
dsk = ensure_dict(delayed(writes).dask)
@@ -642,9 +643,10 @@ class Bag(Base):
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=None, compression='infer',
- encoding=system_encoding, compute=True, get=None):
+ encoding=system_encoding, compute=True, get=None,
+ storage_options=None):
return to_textfiles(self, path, name_function, compression, encoding,
- compute, get=get)
+ compute, get=get, storage_options=storage_options)
def fold(self, binop, combine=None, initial=no_default, split_every=None):
""" Parallelizable reduction
diff --git a/dask/dataframe/io/csv.py b/dask/dataframe/io/csv.py
index 9ceaad2e2..c999aa414 100644
--- a/dask/dataframe/io/csv.py
+++ b/dask/dataframe/io/csv.py
@@ -364,7 +364,7 @@ def _to_csv_chunk(df, **kwargs):
def to_csv(df, filename, name_function=None, compression=None, compute=True,
- get=None, **kwargs):
+ get=None, storage_options=None, **kwargs):
"""
Store Dask DataFrame to CSV files
@@ -472,10 +472,12 @@ def to_csv(df, filename, name_function=None, compression=None, compute=True,
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
+ storage_options: dict
+ Parameters passed on to the backend filesystem class.
"""
values = [_to_csv_chunk(d, **kwargs) for d in df.to_delayed()]
values = write_bytes(values, filename, name_function, compression,
- encoding=None)
+ encoding=None, **(storage_options or {}))
if compute:
delayed(values).compute(get=get)
| Can't specify storage_options for bag.to_textfiles, dataframe.to_csv
The read functions allow specifying `storage_options` for things like AWS key/secret. The `to_parquet` function allows this as well.
However in other cases such as `bag.to_textfiles()` and `dataframe.to_csv()`, it is not possible to specify these options. It would be nice for there to be consistency, since there is no other way to specify some of the options at this time. | dask/dask | diff --git a/dask/array/tests/test_fft.py b/dask/array/tests/test_fft.py
index c02b7fefd..ab6de9560 100644
--- a/dask/array/tests/test_fft.py
+++ b/dask/array/tests/test_fft.py
@@ -9,6 +9,10 @@ import dask.array.fft
from dask.array.fft import fft_wrap
from dask.array.utils import assert_eq
+from dask.array.core import (
+ normalize_chunks as _normalize_chunks,
+)
+
def same_keys(a, b):
def key(k):
@@ -202,7 +206,13 @@ def test_wrap_fftns(modname, funcname, dtype):
@pytest.mark.parametrize("c", [lambda m: m, lambda m: (1, m - 1)])
def test_fftfreq(n, d, c):
c = c(n)
- assert_eq(da.fft.fftfreq(n, d, chunks=c), np.fft.fftfreq(n, d))
+
+ r1 = np.fft.fftfreq(n, d)
+ r2 = da.fft.fftfreq(n, d, chunks=c)
+
+ assert _normalize_chunks(c, r2.shape) == r2.chunks
+
+ assert_eq(r1, r2)
@pytest.mark.parametrize("n", [1, 2, 3, 6, 7])
@@ -210,9 +220,13 @@ def test_fftfreq(n, d, c):
@pytest.mark.parametrize("c", [lambda m: m // 2 + 1, lambda m: (1, m // 2)])
def test_rfftfreq(n, d, c):
c = c(n)
- assert_eq(
- da.fft.rfftfreq(n, d, chunks=c), np.fft.rfftfreq(n, d)
- )
+
+ r1 = np.fft.rfftfreq(n, d)
+ r2 = da.fft.rfftfreq(n, d, chunks=c)
+
+ assert _normalize_chunks(c, r2.shape) == r2.chunks
+
+ assert_eq(r1, r2)
@pytest.mark.parametrize("funcname", ["fftshift", "ifftshift"])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 1.17 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-xdist"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
botocore==1.23.24
certifi==2021.5.30
charset-normalizer==3.0.1
click==8.0.4
cloudpickle==2.2.1
-e git+https://github.com/dask/dask.git@37c3ae2e091412f5e2fdf3c957c383e89e4c8bb2#egg=dask
distributed==1.19.3
execnet==1.9.0
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==0.10.0
locket==1.0.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
s3fs==2022.1.0
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wrapt==1.16.0
yarl==1.7.2
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- botocore==1.23.24
- charset-normalizer==3.0.1
- click==8.0.4
- cloudpickle==2.2.1
- distributed==1.19.3
- execnet==1.9.0
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- jmespath==0.10.0
- locket==1.0.0
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- s3fs==2022.1.0
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- wrapt==1.16.0
- yarl==1.7.2
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-1]"
] | [] | [
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[fft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[ifft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[rfft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[irfft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[hfft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[ihfft]",
"dask/array/tests/test_fft.py::test_fft[fft]",
"dask/array/tests/test_fft.py::test_fft[ifft]",
"dask/array/tests/test_fft.py::test_fft[rfft]",
"dask/array/tests/test_fft.py::test_fft[irfft]",
"dask/array/tests/test_fft.py::test_fft[hfft]",
"dask/array/tests/test_fft.py::test_fft[ihfft]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[fft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[ifft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[fftn]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[ifftn]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[rfft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[irfft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[rfftn]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[irfftn]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[fft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[ifft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[rfft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[irfft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[hfft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[ihfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[fft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[ifft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[rfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[irfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[hfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[ihfft]",
"dask/array/tests/test_fft.py::test_wrap_bad_kind",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-fft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-ifft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-fftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-ifftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-rfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-irfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-rfftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-irfftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-fft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-ifft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-fftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-ifftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-rfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-irfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-rfftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-irfftn]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-fft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-ifft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-rfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-irfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-hfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-ihfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-fft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-ifft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-rfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-irfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-hfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-ihfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-fft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-ifft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-fftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-ifftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-rfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-irfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-rfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-irfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-fft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-ifft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-fftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-ifftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-rfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-irfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-rfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-irfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_fftshift[None-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[None-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[0-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[0-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[1-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[1-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[2-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[2-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes4-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes4-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes5-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes5-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes6-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes6-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes7-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes7-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[None-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[None-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[0-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[0-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[1-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[1-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[2-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[2-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes4-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes4-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes5-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes5-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes6-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes6-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes7-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes7-ifftshift-fftshift]"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,380 | 1,016 | [
"dask/array/fft.py",
"dask/bag/core.py",
"dask/dataframe/io/csv.py"
] |
dask__dask-2467 | ecdae84aa3b8ee5408876b0b7d7eade3d127e177 | 2017-06-16 16:29:49 | c560965c8fc0da7cbc0920d43b7011d2721307d3 | mrocklin: This looks good to me. I was surprised to see that serializing `(slice, [...])` was as fast (or faster) as serializing `slice(...)` | diff --git a/dask/dataframe/io/io.py b/dask/dataframe/io/io.py
index 93b47bb3c..5c82683aa 100644
--- a/dask/dataframe/io/io.py
+++ b/dask/dataframe/io/io.py
@@ -343,7 +343,7 @@ def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
def from_dask_array(x, columns=None):
- """ Create Dask Array from a Dask DataFrame
+ """ Create a Dask DataFrame from a Dask Array.
Converts a 2d array into a DataFrame and a 1d array into a Series.
diff --git a/dask/delayed.py b/dask/delayed.py
index a9cd531e0..8f0943d0d 100644
--- a/dask/delayed.py
+++ b/dask/delayed.py
@@ -64,26 +64,33 @@ def to_task_dask(expr):
"""
if isinstance(expr, Delayed):
return expr.key, expr.dask
+
if isinstance(expr, base.Base):
name = 'finalize-' + tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(ensure_dict(expr.dask), keys)
dsk[name] = (expr._finalize, (concrete, keys))
return name, dsk
- if isinstance(expr, tuple) and type(expr) != tuple:
- return expr, {}
- if isinstance(expr, (Iterator, list, tuple, set)):
+
+ if isinstance(expr, Iterator):
+ expr = list(expr)
+ typ = type(expr)
+
+ if typ in (list, tuple, set):
args, dasks = unzip((to_task_dask(e) for e in expr), 2)
args = list(args)
dsk = sharedict.merge(*dasks)
# Ensure output type matches input type
- if isinstance(expr, (tuple, set)):
- return (type(expr), args), dsk
- else:
- return args, dsk
- if type(expr) is dict:
+ return (args, dsk) if typ is list else ((typ, args), dsk)
+
+ if typ is dict:
args, dsk = to_task_dask([[k, v] for k, v in expr.items()])
return (dict, args), dsk
+
+ if typ is slice:
+ args, dsk = to_task_dask([expr.start, expr.stop, expr.step])
+ return (slice,) + tuple(args), dsk
+
return expr, {}
| Slicing delayed objects with delayed objects does not work
I'm trying to wrap my code with delayed, using `dask 0.15.0`, `python 3.6.0` and `numpy 1.12.1`, but I stumbled on an exception.
```
from dask import delayed
import numpy as np
a = delayed(np.arange(10))
b = delayed(2)
a[:b].compute()
```
results in: `TypeError: slice indices must be integers or None or have an __index__ method`
```
a[delayed(slice)(None,b)].compute()
```
Works, even only calling `slice(None,b)` gives no error, but then again `a[slice(None,b)]` gives an identical error. Isn't it to be expected that a delayed object would wrap its own slice objects when sliced?
| dask/dask | diff --git a/dask/tests/test_delayed.py b/dask/tests/test_delayed.py
index 837e739a4..8c89cca23 100644
--- a/dask/tests/test_delayed.py
+++ b/dask/tests/test_delayed.py
@@ -1,4 +1,4 @@
-from collections import Iterator, namedtuple
+from collections import namedtuple
from operator import add, setitem
import pickle
from random import random
@@ -32,6 +32,10 @@ def test_to_task_dask():
assert task == x
assert dict(dask) == {}
+ task, dask = to_task_dask(slice(a, b, 3))
+ assert task == (slice, 'a', 'b', 3)
+ assert dict(dask) == merge(a.dask, b.dask)
+
# Issue https://github.com/dask/dask/issues/2107
class MyClass(dict):
pass
@@ -58,6 +62,8 @@ def test_operators():
a = delayed([1, 2, 3])
assert a[0].compute() == 1
assert (a + a).compute() == [1, 2, 3, 1, 2, 3]
+ b = delayed(2)
+ assert a[:b].compute() == [1, 2]
a = delayed(10)
assert (a + 1).compute() == 11
@@ -154,7 +160,6 @@ def test_lists_are_concrete():
assert c.compute() == 20
[email protected]
def test_iterators():
a = delayed(1)
b = delayed(2)
@@ -163,7 +168,6 @@ def test_iterators():
assert c.compute() == 3
def f(seq):
- assert isinstance(seq, Iterator)
return sum(seq)
c = delayed(f)(iter([a, b]))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
botocore==1.23.24
certifi==2021.5.30
charset-normalizer==3.0.1
click==8.0.4
cloudpickle==2.2.1
coverage==6.2
-e git+https://github.com/dask/dask.git@ecdae84aa3b8ee5408876b0b7d7eade3d127e177#egg=dask
distributed==1.19.3
execnet==1.9.0
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==0.10.0
locket==1.0.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
s3fs==2022.1.0
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wrapt==1.16.0
yarl==1.7.2
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- botocore==1.23.24
- charset-normalizer==3.0.1
- click==8.0.4
- cloudpickle==2.2.1
- coverage==6.2
- distributed==1.19.3
- execnet==1.9.0
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- jmespath==0.10.0
- locket==1.0.0
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- s3fs==2022.1.0
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- wrapt==1.16.0
- yarl==1.7.2
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/tests/test_delayed.py::test_to_task_dask",
"dask/tests/test_delayed.py::test_operators"
] | [] | [
"dask/tests/test_delayed.py::test_delayed",
"dask/tests/test_delayed.py::test_methods",
"dask/tests/test_delayed.py::test_attributes",
"dask/tests/test_delayed.py::test_method_getattr_optimize",
"dask/tests/test_delayed.py::test_delayed_errors",
"dask/tests/test_delayed.py::test_common_subexpressions",
"dask/tests/test_delayed.py::test_lists",
"dask/tests/test_delayed.py::test_literates",
"dask/tests/test_delayed.py::test_literates_keys",
"dask/tests/test_delayed.py::test_lists_are_concrete",
"dask/tests/test_delayed.py::test_iterators",
"dask/tests/test_delayed.py::test_traverse_false",
"dask/tests/test_delayed.py::test_pure",
"dask/tests/test_delayed.py::test_pure_global_setting",
"dask/tests/test_delayed.py::test_nout",
"dask/tests/test_delayed.py::test_kwargs",
"dask/tests/test_delayed.py::test_array_delayed",
"dask/tests/test_delayed.py::test_array_bag_delayed",
"dask/tests/test_delayed.py::test_delayed_picklable",
"dask/tests/test_delayed.py::test_delayed_compute_forward_kwargs",
"dask/tests/test_delayed.py::test_delayed_method_descriptor",
"dask/tests/test_delayed.py::test_delayed_callable",
"dask/tests/test_delayed.py::test_delayed_name_on_call",
"dask/tests/test_delayed.py::test_callable_obj",
"dask/tests/test_delayed.py::test_name_consistent_across_instances",
"dask/tests/test_delayed.py::test_sensitive_to_partials",
"dask/tests/test_delayed.py::test_delayed_name",
"dask/tests/test_delayed.py::test_finalize_name"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,381 | 609 | [
"dask/dataframe/io/io.py",
"dask/delayed.py"
] |
dask__dask-2468 | ecdae84aa3b8ee5408876b0b7d7eade3d127e177 | 2017-06-16 16:53:44 | c560965c8fc0da7cbc0920d43b7011d2721307d3 | diff --git a/dask/bag/core.py b/dask/bag/core.py
index aed4e6ca2..1cb43c01b 100644
--- a/dask/bag/core.py
+++ b/dask/bag/core.py
@@ -1987,5 +1987,5 @@ def split(seq, n):
def to_dataframe(seq, columns, dtypes):
import pandas as pd
- res = pd.DataFrame(seq, columns=list(columns))
+ res = pd.DataFrame(reify(seq), columns=list(columns))
return res.astype(dtypes, copy=False)
diff --git a/dask/dataframe/io/io.py b/dask/dataframe/io/io.py
index 93b47bb3c..5c82683aa 100644
--- a/dask/dataframe/io/io.py
+++ b/dask/dataframe/io/io.py
@@ -343,7 +343,7 @@ def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
def from_dask_array(x, columns=None):
- """ Create Dask Array from a Dask DataFrame
+ """ Create a Dask DataFrame from a Dask Array.
Converts a 2d array into a DataFrame and a 1d array into a Series.
diff --git a/dask/delayed.py b/dask/delayed.py
index a9cd531e0..8f0943d0d 100644
--- a/dask/delayed.py
+++ b/dask/delayed.py
@@ -64,26 +64,33 @@ def to_task_dask(expr):
"""
if isinstance(expr, Delayed):
return expr.key, expr.dask
+
if isinstance(expr, base.Base):
name = 'finalize-' + tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(ensure_dict(expr.dask), keys)
dsk[name] = (expr._finalize, (concrete, keys))
return name, dsk
- if isinstance(expr, tuple) and type(expr) != tuple:
- return expr, {}
- if isinstance(expr, (Iterator, list, tuple, set)):
+
+ if isinstance(expr, Iterator):
+ expr = list(expr)
+ typ = type(expr)
+
+ if typ in (list, tuple, set):
args, dasks = unzip((to_task_dask(e) for e in expr), 2)
args = list(args)
dsk = sharedict.merge(*dasks)
# Ensure output type matches input type
- if isinstance(expr, (tuple, set)):
- return (type(expr), args), dsk
- else:
- return args, dsk
- if type(expr) is dict:
+ return (args, dsk) if typ is list else ((typ, args), dsk)
+
+ if typ is dict:
args, dsk = to_task_dask([[k, v] for k, v in expr.items()])
return (dict, args), dsk
+
+ if typ is slice:
+ args, dsk = to_task_dask([expr.start, expr.stop, expr.step])
+ return (slice,) + tuple(args), dsk
+
return expr, {}
| Can't convert bag -> dataframe -> bag after flatten()
When I try to use `flatten()/concat()` on a bag and then proceed to convert to dataframe and back, the `pd.DataFrame` constructor fails because the data is passed in as an Iterator. See example below. Commenting out `.flatten()` or `.to_bag()` both cause it to be successful. But as written it causes an exception.
```
import dask.bag as db
(db
.from_sequence(5 * [range(10)])
.flatten()
.to_dataframe()
.to_bag()
.compute()
)
```
```
TypeError Traceback (most recent call last)
<ipython-input-1-31fb669b888e> in <module>()
2
3 (db
----> 4 .from_sequence(5 * [range(10)])
5 .flatten()
6 .to_dataframe()
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/base.pyc in compute(self, **kwargs)
95 Extra keywords to forward to the scheduler ``get`` function.
96 """
---> 97 (result,) = compute(self, traverse=False, **kwargs)
98 return result
99
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/base.pyc in compute(*args, **kwargs)
202 dsk = collections_to_dsk(variables, optimize_graph, **kwargs)
203 keys = [var._keys() for var in variables]
--> 204 results = get(dsk, keys, **kwargs)
205
206 results_iter = iter(results)
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/multiprocessing.pyc in get(dsk, keys, num_workers, func_loads, func_dumps, optimize_graph, **kwargs)
175 get_id=_process_get_id, dumps=dumps, loads=loads,
176 pack_exception=pack_exception,
--> 177 raise_exception=reraise, **kwargs)
178 finally:
179 if cleanup:
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
519 _execute_task(task, data) # Re-execute locally
520 else:
--> 521 raise_exception(exc, tb)
522 res, worker_id = loads(res_info)
523 state['cache'][key] = res
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in execute_task()
288 try:
289 task, data = loads(task_info)
--> 290 result = _execute_task(task, data)
291 id = get_id()
292 result = dumps((result, id))
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in _execute_task()
268 elif istask(arg):
269 func, args = arg[0], arg[1:]
--> 270 args2 = [_execute_task(a, cache) for a in args]
271 return func(*args2)
272 elif not ishashable(arg):
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in _execute_task()
269 func, args = arg[0], arg[1:]
270 args2 = [_execute_task(a, cache) for a in args]
--> 271 return func(*args2)
272 elif not ishashable(arg):
273 return arg
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/bag/core.pyc in to_dataframe()
1988 def to_dataframe(seq, columns, dtypes):
1989 import pandas as pd
-> 1990 res = pd.DataFrame(seq, columns=list(columns))
1991 return res.astype(dtypes, copy=False)
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/pandas/core/frame.pyc in __init__()
323 mgr = self._init_dict({}, index, columns, dtype=dtype)
324 elif isinstance(data, collections.Iterator):
--> 325 raise TypeError("data argument can't be an iterator")
326 else:
327 try:
TypeError: data argument can't be an iterator
```
Versions:
Python: 3.5.2
Pandas: 0.20.2
Dask: 0.15.0
AND
Python: 2.7.10
Pandas: 0.19.2
Dask: 0.15.0 | dask/dask | diff --git a/dask/bag/tests/test_bag.py b/dask/bag/tests/test_bag.py
index bd3679cbe..0053dc0b6 100644
--- a/dask/bag/tests/test_bag.py
+++ b/dask/bag/tests/test_bag.py
@@ -822,6 +822,13 @@ def test_to_dataframe():
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
+ # Works with iterators
+ b = db.from_sequence(range(100), npartitions=5).map_partitions(iter)
+ sol = pd.DataFrame({'a': range(100)})
+ df = b.to_dataframe(columns=sol)
+ dd.utils.assert_eq(df, sol, check_index=False)
+ check_parts(df, sol)
+
ext_open = [('gz', GzipFile), ('', open)]
if not PY2:
diff --git a/dask/tests/test_delayed.py b/dask/tests/test_delayed.py
index 837e739a4..8c89cca23 100644
--- a/dask/tests/test_delayed.py
+++ b/dask/tests/test_delayed.py
@@ -1,4 +1,4 @@
-from collections import Iterator, namedtuple
+from collections import namedtuple
from operator import add, setitem
import pickle
from random import random
@@ -32,6 +32,10 @@ def test_to_task_dask():
assert task == x
assert dict(dask) == {}
+ task, dask = to_task_dask(slice(a, b, 3))
+ assert task == (slice, 'a', 'b', 3)
+ assert dict(dask) == merge(a.dask, b.dask)
+
# Issue https://github.com/dask/dask/issues/2107
class MyClass(dict):
pass
@@ -58,6 +62,8 @@ def test_operators():
a = delayed([1, 2, 3])
assert a[0].compute() == 1
assert (a + a).compute() == [1, 2, 3, 1, 2, 3]
+ b = delayed(2)
+ assert a[:b].compute() == [1, 2]
a = delayed(10)
assert (a + 1).compute() == 11
@@ -154,7 +160,6 @@ def test_lists_are_concrete():
assert c.compute() == 20
[email protected]
def test_iterators():
a = delayed(1)
b = delayed(2)
@@ -163,7 +168,6 @@ def test_iterators():
assert c.compute() == 3
def f(seq):
- assert isinstance(seq, Iterator)
return sum(seq)
c = delayed(f)(iter([a, b]))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"moto",
"mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.23.24
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
click==8.0.4
cloudpickle==2.2.1
cryptography==40.0.2
-e git+https://github.com/dask/dask.git@ecdae84aa3b8ee5408876b0b7d7eade3d127e177#egg=dask
dataclasses==0.8
distributed==1.19.3
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
jmespath==0.10.0
locket==1.0.0
MarkupSafe==2.0.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
moto==4.0.13
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
responses==0.17.0
s3fs==2022.1.0
s3transfer==0.5.2
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
Werkzeug==2.0.3
wrapt==1.16.0
xmltodict==0.14.2
yarl==1.7.2
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- boto3==1.23.10
- botocore==1.23.24
- cffi==1.15.1
- charset-normalizer==2.0.12
- click==8.0.4
- cloudpickle==2.2.1
- cryptography==40.0.2
- dataclasses==0.8
- distributed==1.19.3
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- jinja2==3.0.3
- jmespath==0.10.0
- locket==1.0.0
- markupsafe==2.0.1
- mock==5.2.0
- moto==4.0.13
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pycparser==2.21
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- responses==0.17.0
- s3fs==2022.1.0
- s3transfer==0.5.2
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- werkzeug==2.0.3
- wrapt==1.16.0
- xmltodict==0.14.2
- yarl==1.7.2
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/tests/test_delayed.py::test_to_task_dask",
"dask/tests/test_delayed.py::test_operators"
] | [
"dask/bag/tests/test_bag.py::test_to_dataframe"
] | [
"dask/bag/tests/test_bag.py::test_Bag",
"dask/bag/tests/test_bag.py::test_keys",
"dask/bag/tests/test_bag.py::test_map",
"dask/bag/tests/test_bag.py::test_map_function_with_multiple_arguments",
"dask/bag/tests/test_bag.py::test_map_with_constructors",
"dask/bag/tests/test_bag.py::test_map_with_builtins",
"dask/bag/tests/test_bag.py::test_map_with_kwargs",
"dask/bag/tests/test_bag.py::test_bag_map",
"dask/bag/tests/test_bag.py::test_map_method",
"dask/bag/tests/test_bag.py::test_starmap",
"dask/bag/tests/test_bag.py::test_filter",
"dask/bag/tests/test_bag.py::test_remove",
"dask/bag/tests/test_bag.py::test_iter",
"dask/bag/tests/test_bag.py::test_repr[str]",
"dask/bag/tests/test_bag.py::test_repr[repr]",
"dask/bag/tests/test_bag.py::test_pluck",
"dask/bag/tests/test_bag.py::test_pluck_with_default",
"dask/bag/tests/test_bag.py::test_unzip",
"dask/bag/tests/test_bag.py::test_fold",
"dask/bag/tests/test_bag.py::test_distinct",
"dask/bag/tests/test_bag.py::test_frequencies",
"dask/bag/tests/test_bag.py::test_topk",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[1]",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[2]",
"dask/bag/tests/test_bag.py::test_topk_with_multiarg_lambda",
"dask/bag/tests/test_bag.py::test_lambdas",
"dask/bag/tests/test_bag.py::test_reductions",
"dask/bag/tests/test_bag.py::test_reduction_names",
"dask/bag/tests/test_bag.py::test_tree_reductions",
"dask/bag/tests/test_bag.py::test_aggregation[1]",
"dask/bag/tests/test_bag.py::test_aggregation[3]",
"dask/bag/tests/test_bag.py::test_aggregation[4]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[1]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[10]",
"dask/bag/tests/test_bag.py::test_std",
"dask/bag/tests/test_bag.py::test_var",
"dask/bag/tests/test_bag.py::test_join",
"dask/bag/tests/test_bag.py::test_foldby",
"dask/bag/tests/test_bag.py::test_map_partitions",
"dask/bag/tests/test_bag.py::test_map_partitions_args_kwargs",
"dask/bag/tests/test_bag.py::test_random_sample_size",
"dask/bag/tests/test_bag.py::test_random_sample_prob_range",
"dask/bag/tests/test_bag.py::test_random_sample_repeated_computation",
"dask/bag/tests/test_bag.py::test_random_sample_different_definitions",
"dask/bag/tests/test_bag.py::test_random_sample_random_state",
"dask/bag/tests/test_bag.py::test_lazify_task",
"dask/bag/tests/test_bag.py::test_lazify",
"dask/bag/tests/test_bag.py::test_inline_singleton_lists",
"dask/bag/tests/test_bag.py::test_take",
"dask/bag/tests/test_bag.py::test_take_npartitions",
"dask/bag/tests/test_bag.py::test_take_npartitions_warn",
"dask/bag/tests/test_bag.py::test_map_is_lazy",
"dask/bag/tests/test_bag.py::test_can_use_dict_to_make_concrete",
"dask/bag/tests/test_bag.py::test_read_text",
"dask/bag/tests/test_bag.py::test_read_text_large",
"dask/bag/tests/test_bag.py::test_read_text_encoding",
"dask/bag/tests/test_bag.py::test_read_text_large_gzip",
"dask/bag/tests/test_bag.py::test_from_sequence",
"dask/bag/tests/test_bag.py::test_from_long_sequence",
"dask/bag/tests/test_bag.py::test_product",
"dask/bag/tests/test_bag.py::test_partition_collect",
"dask/bag/tests/test_bag.py::test_groupby",
"dask/bag/tests/test_bag.py::test_groupby_with_indexer",
"dask/bag/tests/test_bag.py::test_groupby_with_npartitions_changed",
"dask/bag/tests/test_bag.py::test_concat",
"dask/bag/tests/test_bag.py::test_flatten",
"dask/bag/tests/test_bag.py::test_concat_after_map",
"dask/bag/tests/test_bag.py::test_args",
"dask/bag/tests/test_bag.py::test_to_textfiles[gz-GzipFile]",
"dask/bag/tests/test_bag.py::test_to_textfiles[-open]",
"dask/bag/tests/test_bag.py::test_to_textfiles[bz2-BZ2File]",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_preserves_order",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_warn",
"dask/bag/tests/test_bag.py::test_to_textfiles_encoding",
"dask/bag/tests/test_bag.py::test_to_textfiles_inputs",
"dask/bag/tests/test_bag.py::test_to_textfiles_endlines",
"dask/bag/tests/test_bag.py::test_string_namespace",
"dask/bag/tests/test_bag.py::test_string_namespace_with_unicode",
"dask/bag/tests/test_bag.py::test_str_empty_split",
"dask/bag/tests/test_bag.py::test_map_with_iterator_function",
"dask/bag/tests/test_bag.py::test_ensure_compute_output_is_concrete",
"dask/bag/tests/test_bag.py::test_bag_class_extend",
"dask/bag/tests/test_bag.py::test_gh715",
"dask/bag/tests/test_bag.py::test_bag_compute_forward_kwargs",
"dask/bag/tests/test_bag.py::test_to_delayed",
"dask/bag/tests/test_bag.py::test_to_delayed_optimizes",
"dask/bag/tests/test_bag.py::test_from_delayed",
"dask/bag/tests/test_bag.py::test_from_delayed_iterator",
"dask/bag/tests/test_bag.py::test_range",
"dask/bag/tests/test_bag.py::test_zip[1]",
"dask/bag/tests/test_bag.py::test_zip[7]",
"dask/bag/tests/test_bag.py::test_zip[10]",
"dask/bag/tests/test_bag.py::test_zip[28]",
"dask/bag/tests/test_bag.py::test_repartition[1-1]",
"dask/bag/tests/test_bag.py::test_repartition[1-2]",
"dask/bag/tests/test_bag.py::test_repartition[1-7]",
"dask/bag/tests/test_bag.py::test_repartition[1-11]",
"dask/bag/tests/test_bag.py::test_repartition[1-23]",
"dask/bag/tests/test_bag.py::test_repartition[2-1]",
"dask/bag/tests/test_bag.py::test_repartition[2-2]",
"dask/bag/tests/test_bag.py::test_repartition[2-7]",
"dask/bag/tests/test_bag.py::test_repartition[2-11]",
"dask/bag/tests/test_bag.py::test_repartition[2-23]",
"dask/bag/tests/test_bag.py::test_repartition[5-1]",
"dask/bag/tests/test_bag.py::test_repartition[5-2]",
"dask/bag/tests/test_bag.py::test_repartition[5-7]",
"dask/bag/tests/test_bag.py::test_repartition[5-11]",
"dask/bag/tests/test_bag.py::test_repartition[5-23]",
"dask/bag/tests/test_bag.py::test_repartition[12-1]",
"dask/bag/tests/test_bag.py::test_repartition[12-2]",
"dask/bag/tests/test_bag.py::test_repartition[12-7]",
"dask/bag/tests/test_bag.py::test_repartition[12-11]",
"dask/bag/tests/test_bag.py::test_repartition[12-23]",
"dask/bag/tests/test_bag.py::test_repartition[23-1]",
"dask/bag/tests/test_bag.py::test_repartition[23-2]",
"dask/bag/tests/test_bag.py::test_repartition[23-7]",
"dask/bag/tests/test_bag.py::test_repartition[23-11]",
"dask/bag/tests/test_bag.py::test_repartition[23-23]",
"dask/bag/tests/test_bag.py::test_repartition_names",
"dask/bag/tests/test_bag.py::test_accumulate",
"dask/bag/tests/test_bag.py::test_groupby_tasks",
"dask/bag/tests/test_bag.py::test_groupby_tasks_names",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[1000-20-100]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[12345-234-1042]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_3",
"dask/bag/tests/test_bag.py::test_to_textfiles_empty_partitions",
"dask/bag/tests/test_bag.py::test_reduction_empty",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[1]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[2]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[4]",
"dask/bag/tests/test_bag.py::test_reduction_with_non_comparable_objects",
"dask/bag/tests/test_bag.py::test_empty",
"dask/bag/tests/test_bag.py::test_bag_picklable",
"dask/bag/tests/test_bag.py::test_msgpack_unicode",
"dask/bag/tests/test_bag.py::test_bag_with_single_callable",
"dask/bag/tests/test_bag.py::test_optimize_fuse_keys",
"dask/bag/tests/test_bag.py::test_reductions_are_lazy",
"dask/bag/tests/test_bag.py::test_repeated_groupby",
"dask/bag/tests/test_bag.py::test_temporary_directory",
"dask/bag/tests/test_bag.py::test_empty_bag",
"dask/tests/test_delayed.py::test_delayed",
"dask/tests/test_delayed.py::test_methods",
"dask/tests/test_delayed.py::test_attributes",
"dask/tests/test_delayed.py::test_method_getattr_optimize",
"dask/tests/test_delayed.py::test_delayed_errors",
"dask/tests/test_delayed.py::test_common_subexpressions",
"dask/tests/test_delayed.py::test_lists",
"dask/tests/test_delayed.py::test_literates",
"dask/tests/test_delayed.py::test_literates_keys",
"dask/tests/test_delayed.py::test_lists_are_concrete",
"dask/tests/test_delayed.py::test_iterators",
"dask/tests/test_delayed.py::test_traverse_false",
"dask/tests/test_delayed.py::test_pure",
"dask/tests/test_delayed.py::test_pure_global_setting",
"dask/tests/test_delayed.py::test_nout",
"dask/tests/test_delayed.py::test_kwargs",
"dask/tests/test_delayed.py::test_array_delayed",
"dask/tests/test_delayed.py::test_array_bag_delayed",
"dask/tests/test_delayed.py::test_delayed_picklable",
"dask/tests/test_delayed.py::test_delayed_compute_forward_kwargs",
"dask/tests/test_delayed.py::test_delayed_method_descriptor",
"dask/tests/test_delayed.py::test_delayed_callable",
"dask/tests/test_delayed.py::test_delayed_name_on_call",
"dask/tests/test_delayed.py::test_callable_obj",
"dask/tests/test_delayed.py::test_name_consistent_across_instances",
"dask/tests/test_delayed.py::test_sensitive_to_partials",
"dask/tests/test_delayed.py::test_delayed_name",
"dask/tests/test_delayed.py::test_finalize_name"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,382 | 745 | [
"dask/bag/core.py",
"dask/dataframe/io/io.py",
"dask/delayed.py"
] |
|
bokeh__bokeh-6488 | 74cf9e9e173fe3dbff132bf94257310869f9614f | 2017-06-18 02:35:12 | 44b63d65efec1e06fb565a9a81e0f2f21315e85a | diff --git a/bokeh/util/serialization.py b/bokeh/util/serialization.py
index 96c406a88..dd6d27e78 100644
--- a/bokeh/util/serialization.py
+++ b/bokeh/util/serialization.py
@@ -101,7 +101,8 @@ def convert_datetime_type(obj):
# Datetime (datetime is a subclass of date)
elif isinstance(obj, dt.datetime):
- return (obj - DT_EPOCH).total_seconds() * 1000. + obj.microsecond / 1000.
+ diff = obj.replace(tzinfo=None) - DT_EPOCH
+ return diff.total_seconds() * 1000. + obj.microsecond / 1000.
# Timedelta (timedelta is class in the datetime library)
elif isinstance(obj, dt.timedelta):
| bokeh.util.serialization.py convert_datetime_type breaks when passed timezone aware datetime
After updating to bokeh 0.12.6 the convert_datetime_type became unable to handle datetime objecs that are timezone aware. The subtraction of DT_EPOCH which is a naive datetime from a obj which might be aware or naive fails when obj is timezone aware. The error indicates that subtraction of a datetime object and a naive object is not supported.
This occurs with python 2.7.
proposed fix to serialization.py attached.
$ pip freeze
appdirs==1.4.3
awsebcli==3.7.7
backports-abc==0.4
backports.ssl-match-hostname==3.5.0.1
bkcharts==0.2
blessed==1.9.5
bokeh==0.12.6
botocore==1.4.40
cement==2.8.2
certifi==2016.2.28
colorama==0.3.7
cycler==0.10.0
DateTime==4.1.1
Django==1.9.8
docker-py==1.7.2
dockerpty==0.4.1
docopt==0.6.2
docutils==0.12
futures==3.0.5
Jinja2==2.8
jmespath==0.9.0
MarkupSafe==0.23
msgpack-python==0.4.8
numpy==1.11.1
packaging==16.8
pandas==0.18.1
pathspec==0.3.4
pyasn1==0.1.9
pygobject==3.18.2
pyparsing==2.1.5
python-dateutil==2.5.3
pytz==2016.6.1
PyYAML==3.11
requests==2.9.1
rsa==3.4.2
s3transfer==0.0.1
semantic-version==2.5.0
singledispatch==3.4.0.3
six==1.10.0
tabulate==0.7.5
td-client==0.5.0
termcolor==1.1.0
texttable==0.8.4
tornado==4.4.1
tzlocal==1.2.2
urllib3==1.17
virtualenv==15.0.2
wcwidth==0.1.7
websocket-client==0.37.0
zope.interface==4.2.0
[serialization.py.txt](https://github.com/bokeh/bokeh/files/1078483/serialization.py.txt)
| bokeh/bokeh | diff --git a/bokeh/util/tests/test_serialization.py b/bokeh/util/tests/test_serialization.py
index c664a5c9a..501f2f679 100644
--- a/bokeh/util/tests/test_serialization.py
+++ b/bokeh/util/tests/test_serialization.py
@@ -6,6 +6,7 @@ import base64
import pytest
import numpy as np
import pandas as pd
+import pytz
import bokeh.util.serialization as bus
@@ -60,6 +61,12 @@ def test_convert_datetime_type():
assert bus.convert_datetime_type(pd.Timedelta("3000ms")) == 3000.0
assert bus.convert_datetime_type(bus._pd_timestamp(3000000)) == 3.0
+def test_convert_datetime_type_with_tz():
+ # This ensures datetimes are sent to BokehJS timezone-naive
+ # see https://github.com/bokeh/bokeh/issues/6480
+ for tz in pytz.all_timezones:
+ assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11, tzinfo=datetime.tzinfo(tz))) == 1462924800000.0
+
testing = [[float('nan'), 3], [float('-inf'), [float('inf')]]]
expected = [['NaN', 3.0], ['-Infinity', ['Infinity']]]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install bokeh",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bokeh==3.4.3
contourpy==1.3.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pillow==11.1.0
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==6.4.2
tzdata==2025.2
xyzservices==2025.1.0
| name: bokeh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bokeh==3.4.3
- contourpy==1.3.0
- jinja2==3.1.6
- markupsafe==3.0.2
- numpy==2.0.2
- pandas==2.2.3
- pillow==11.1.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- six==1.17.0
- tornado==6.4.2
- tzdata==2025.2
- xyzservices==2025.1.0
prefix: /opt/conda/envs/bokeh
| [
"bokeh/util/tests/test_serialization.py::test_convert_datetime_type_with_tz"
] | [
"bokeh/util/tests/test_serialization.py::test_array_encoding_disabled_by_dtype"
] | [
"bokeh/util/tests/test_serialization.py::test_id",
"bokeh/util/tests/test_serialization.py::test_id_with_simple_ids",
"bokeh/util/tests/test_serialization.py::test_np_consts",
"bokeh/util/tests/test_serialization.py::test_binary_array_types",
"bokeh/util/tests/test_serialization.py::test_datetime_types",
"bokeh/util/tests/test_serialization.py::test_is_datetime_type",
"bokeh/util/tests/test_serialization.py::test_convert_datetime_type",
"bokeh/util/tests/test_serialization.py::test_traverse_return_valid_json",
"bokeh/util/tests/test_serialization.py::test_traverse_with_numpy",
"bokeh/util/tests/test_serialization.py::test_traverse_without_numpy",
"bokeh/util/tests/test_serialization.py::test_transform_array_force_list_default",
"bokeh/util/tests/test_serialization.py::test_transform_array_force_list_true",
"bokeh/util/tests/test_serialization.py::test_transform_series_force_list_default",
"bokeh/util/tests/test_serialization.py::test_transform_series_force_list_true",
"bokeh/util/tests/test_serialization.py::test_transform_array_to_list",
"bokeh/util/tests/test_serialization.py::test_transform_array_with_nans_to_list[values0]",
"bokeh/util/tests/test_serialization.py::test_transform_array_with_nans_to_list[values1]",
"bokeh/util/tests/test_serialization.py::test_encode_base64_dict",
"bokeh/util/tests/test_serialization.py::test_decode_base64_dict",
"bokeh/util/tests/test_serialization.py::test_encode_decode_roundtrip"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,386 | 204 | [
"bokeh/util/serialization.py"
] |
|
alexhsamuel__ntab-11 | e28de6ee1a9bf991225bfbeac40f5852dbecab36 | 2017-06-22 15:08:41 | 0cec2c6fdb3e841d13e1e5bc8246bba083f216e0 | diff --git a/ntab/fmt.py b/ntab/fmt.py
new file mode 100644
index 0000000..7c047c4
--- /dev/null
+++ b/ntab/fmt.py
@@ -0,0 +1,22 @@
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import six
+
+from .lib.text import palide
+
+#-------------------------------------------------------------------------------
+
+def format_row(row, width=80, max_name_width=32):
+ """
+ @rtype
+ Generator of lines.
+ """
+ vals = row.__dict__
+ name_width = min(max_name_width, max( len(n) for n in vals ))
+ for name, val in six.iteritems(vals):
+ yield "{}: {}".format(
+ palide(name, name_width),
+ palide(str(val), width - name_width - 2)
+ )
+
+
diff --git a/ntab/html.py b/ntab/html.py
index 9ca4ccb..37c25a4 100644
--- a/ntab/html.py
+++ b/ntab/html.py
@@ -82,7 +82,7 @@ def _render(table, css_class="tab-table", max_rows=None):
yield "<thead>"
yield "<tr>"
for name, width in zip(names, widths):
- yield "<th>{}</th>".format(elide(name, max(width, 8)))
+ yield "<th>{}</th>".format(elide(name, max(width, 8), elide_pos=0.7))
yield "</tr>"
yield "</thead>"
yield "<tbody>"
diff --git a/ntab/lib/text.py b/ntab/lib/text.py
index 8ad3e4b..0812d61 100644
--- a/ntab/lib/text.py
+++ b/ntab/lib/text.py
@@ -27,14 +27,18 @@ def pad(string, length, pad=" ", pos=1.0):
if left > 0:
string = pad * (left // pad_len) + pad[: left % pad_len] + string
if right > 0:
- string = string + pad[-(right % pad_len) :] + pad * (right // pad_len)
+ string = (
+ string
+ + pad[pad_len - (right % pad_len) :]
+ + pad * (right // pad_len)
+ )
return string
_pad = pad
-def elide(string, length, ellipsis=u"\u2026", pos=0.7):
+def elide(string, length, ellipsis=u"\u2026", pos=1.0):
"""
Elides characters if necessary to fit `string` in `length` characters.
@@ -63,7 +67,7 @@ def elide(string, length, ellipsis=u"\u2026", pos=0.7):
def palide(string, length, ellipsis=u"\u2026", pad=" ", pad_pos=1.0,
- elide_pos=0.7):
+ elide_pos=1.0):
"""
A combination of `elide` and `pad`.
"""
diff --git a/ntab/tab.py b/ntab/tab.py
index 5b569b5..010dcb1 100644
--- a/ntab/tab.py
+++ b/ntab/tab.py
@@ -13,7 +13,7 @@ import numpy as np
import six
import sys
-from . import nplib
+from . import fmt, nplib
from .lib import *
#-------------------------------------------------------------------------------
@@ -181,11 +181,22 @@ class Row(object):
)
+ def __str__(self):
+ return "\n".join(fmt.format_row(self))
+
+
@property
- def __index__(self):
+ def __idx__(self):
return self.__idx
+ # FIXME: Potentially sketchy.
+ @property
+ def __dict__(self):
+ return odict(
+ (n, a[self.__idx]) for n, a in six.iteritems(self.__arrs) )
+
+
class RowsProxy(collections.Sequence):
# FIXME: Allow modifying values in rows (i.e. mutable rows)?
| pretty-print a row
Print a row with one field on each line.
| alexhsamuel/ntab | diff --git a/ntab/lib/test/test_text.py b/ntab/lib/test/test_text.py
new file mode 100644
index 0000000..aff5323
--- /dev/null
+++ b/ntab/lib/test/test_text.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import pytest
+
+from ntab.lib.text import *
+
+#-------------------------------------------------------------------------------
+
+def test_pad_length():
+ assert pad("hello", 0) == "hello"
+ assert pad("hello", 4) == "hello"
+ assert pad("hello", 5) == "hello"
+ assert pad("hello", 6) == "hello "
+ assert pad("hello", 10) == "hello "
+ assert pad("hello", length=10) == "hello "
+
+ assert pad("", 0) == ""
+ assert pad("", 5) == " "
+
+
+def test_pad_pad():
+ assert pad("hello", 4, "x") == "hello"
+ assert pad("hello", 6, "x") == "hellox"
+ assert pad("hello", 9, "x") == "helloxxxx"
+ assert pad("hello", 8, "o") == "helloooo"
+ assert pad("hello", 8, "-") == "hello---"
+ assert pad("hello", pad="-", length=8) == "hello---"
+ assert pad("hello", 8, "-=") == "hello=-="
+ assert pad("hello", 12, ".!.") == "hello..!..!."
+
+
+def test_pad_left():
+ assert pad("hello", 4, pos=0 ) == "hello"
+ assert pad("hello", 10, pos=1 ) == "hello "
+ assert pad("hello", 10, pos=0 ) == " hello"
+ assert pad("hello", 10, pos=0, pad="/") == "/////hello"
+
+
+# FIXME: Test center().
+
+def test_elide_default():
+ assert elide("I am a duck.", 8) == u"I am a \u2026"
+ assert elide("I am a duck.", 14) == "I am a duck."
+
+
+def test_elide_length():
+ assert elide("Hello, world!", 15, "...") == "Hello, world!"
+ assert elide("Hello, world!", 13, "...") == "Hello, world!"
+ assert elide("Hello, world!", 12, "...") == "Hello, wo..."
+ assert elide("Hello, world!", 11, "...") == "Hello, w..."
+ assert elide("Hello, world!", 10, "...") == "Hello, ..."
+ assert elide("Hello, world!", 5, "...") == "He..."
+
+ assert elide("foo", 3, "...") == "foo"
+ assert elide("fool", 3, "...") == "..."
+
+
+def test_elide_ellipsis():
+ assert elide("Hello, world!", 10, "...") == "Hello, ..."
+ assert elide("Hello, world!", 10, ".." ) == "Hello, w.."
+ assert elide("Hello, world!", 10, "*" ) == "Hello, wo*"
+ assert elide("Hello, world!", 10, "" ) == "Hello, wor"
+
+ assert elide("Hello, world!", ellipsis="*", length=10) == "Hello, wo*"
+
+
+def test_elide_position():
+ assert elide("Hello, world!", 10, "...", 1.0) == "Hello, ..."
+ assert elide("Hello, world!", 10, "...", 0.7) == "Hello...d!"
+ assert elide("Hello, world!", 10, "...", 0.5) == "Hell...ld!"
+ assert elide("Hello, world!", 10, "...", 0.4) == "Hel...rld!"
+ assert elide("Hello, world!", 10, "...", 0.0) == "... world!"
+
+ assert elide(
+ "Hello, world!", pos=0.4, length=10, ellipsis="..") == "Hel..orld!"
+
+
+def test_palide_length():
+ assert palide("Hello, world!", 3, "...") == "..."
+ assert palide("Hello, world!", 10, "...") == "Hello, ..."
+ assert palide("Hello, world!", 11, "...") == "Hello, w..."
+ assert palide("Hello, world!", 13, "...") == "Hello, world!"
+ assert palide("Hello, world!", 14, "...") == "Hello, world! "
+ assert palide("Hello, world!", 20, "...") == "Hello, world! "
+
+
+def test_palide_ellipsis():
+ assert palide("Hello, world!", 10, "~~~~~") == "Hello~~~~~"
+ assert palide("Hello, world!", 10, "..." ) == "Hello, ..."
+ assert palide("Hello, world!", 10, ".." ) == "Hello, w.."
+ assert palide("Hello, world!", 10, "" ) == "Hello, wor"
+
+
+def test_palide_pad():
+ assert palide("Hello, world!", 13, pad="x") == "Hello, world!"
+ assert palide("Hello, world!", 18, pad="x") == "Hello, world!xxxxx"
+ assert palide("Hello, world!", 18, pad="!") == "Hello, world!!!!!!"
+
+
+def test_palide_position():
+ assert palide("Hello, world!", 11, "..", elide_pos=0.0) == "..o, world!"
+ assert palide("Hello, world!", 11, "..", elide_pos=0.6) == "Hello..rld!"
+ assert palide("Hello, world!", 11, "..", elide_pos=0.8) == "Hello, ..d!"
+
+
+def test_palide_args():
+ assert palide(
+ ellipsis="-//-",
+ length=20,
+ pad="x",
+ elide_pos=0.4,
+ string="The quick brown fox jumped over the lazy dogs.",
+ ) == "The qu-//-lazy dogs."
+
+
+def test_palide_default():
+ assert palide("I am a duck.", 8) == u"I am a \u2026"
+ assert palide("I am a duck.", 14) == "I am a duck. "
+
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
future==1.0.0
iniconfig==2.1.0
-e git+https://github.com/alexhsamuel/ntab.git@e28de6ee1a9bf991225bfbeac40f5852dbecab36#egg=ntab
numpy==2.0.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: ntab
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- future==1.0.0
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/ntab
| [
"ntab/lib/test/test_text.py::test_pad_length",
"ntab/lib/test/test_text.py::test_pad_pad",
"ntab/lib/test/test_text.py::test_pad_left",
"ntab/lib/test/test_text.py::test_elide_default",
"ntab/lib/test/test_text.py::test_elide_length",
"ntab/lib/test/test_text.py::test_elide_ellipsis",
"ntab/lib/test/test_text.py::test_palide_length",
"ntab/lib/test/test_text.py::test_palide_ellipsis",
"ntab/lib/test/test_text.py::test_palide_pad",
"ntab/lib/test/test_text.py::test_palide_default"
] | [] | [
"ntab/lib/test/test_text.py::test_elide_position",
"ntab/lib/test/test_text.py::test_palide_position",
"ntab/lib/test/test_text.py::test_palide_args"
] | [] | MIT License | 1,392 | 1,010 | [
"ntab/html.py",
"ntab/lib/text.py",
"ntab/tab.py"
] |
|
Turbo87__utm-31 | 4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f | 2017-06-26 10:44:15 | 4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f | diff --git a/utm/conversion.py b/utm/conversion.py
old mode 100755
new mode 100644
index d21742a..449f3d1
--- a/utm/conversion.py
+++ b/utm/conversion.py
@@ -216,13 +216,13 @@ def latlon_to_zone_number(latitude, longitude):
return 32
if 72 <= latitude <= 84 and longitude >= 0:
- if longitude <= 9:
+ if longitude < 9:
return 31
- elif longitude <= 21:
+ elif longitude < 21:
return 33
- elif longitude <= 33:
+ elif longitude < 33:
return 35
- elif longitude <= 42:
+ elif longitude < 42:
return 37
return int((longitude + 180) / 6) + 1
| UTM zone exceptions error
By definition zones are left-closed, right-open intervals, e.g. zone 31: 0 <= latitude < 6.
In function latlon_to_zone_number:
```
if 72 <= latitude <= 84 and longitude >= 0:
if longitude <= 9:
return 31
elif longitude <= 21:
return 33
elif longitude <= 33:
return 35
elif longitude <= 42:
return 37
```
For latitudes >=72, this results in:
zone 31: 0 <= longitude <= 9
zone 33: 9 < longitude <= 21
zone 35: 21< longitude <= 33
zone 37: 33< longitude <= 42
but for latitudes < 72:
zone 37: 36 <= latitude < 42
| Turbo87/utm | diff --git a/test/test_utm.py b/test/test_utm.py
index 55686d7..c820cea 100755
--- a/test/test_utm.py
+++ b/test/test_utm.py
@@ -231,5 +231,22 @@ class Zone32V(unittest.TestCase):
self.assert_zone_equal(UTM.from_latlon(64, 12), 33, 'W')
+class TestRightBoundaries(unittest.TestCase):
+
+ def assert_zone_equal(self, result, expected_number):
+ self.assertEqual(result[2], expected_number)
+
+ def test_limits(self):
+ self.assert_zone_equal(UTM.from_latlon(40, 0), 31)
+ self.assert_zone_equal(UTM.from_latlon(40, 5.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(40, 6), 32)
+
+ self.assert_zone_equal(UTM.from_latlon(72, 0), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 5.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 6), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 8.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 9), 33)
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Turbo87/utm.git@4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f#egg=utm
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: utm
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/utm
| [
"test/test_utm.py::TestRightBoundaries::test_limits"
] | [] | [
"test/test_utm.py::KnownValues::test_from_latlon",
"test/test_utm.py::KnownValues::test_to_latlon",
"test/test_utm.py::BadInput::test_from_latlon_range_checks",
"test/test_utm.py::BadInput::test_to_latlon_range_checks",
"test/test_utm.py::Zone32V::test_above",
"test/test_utm.py::Zone32V::test_below",
"test/test_utm.py::Zone32V::test_inside",
"test/test_utm.py::Zone32V::test_left_of",
"test/test_utm.py::Zone32V::test_right_of"
] | [] | MIT License | 1,400 | 231 | [
"utm/conversion.py"
] |
|
openmrslab__suspect-57 | f65efe543321e1d75d6ecbc7c22be73ab84d3d8a | 2017-06-26 15:26:22 | 820e897294d90e08c4b91be7289e4ee9ebc6d009 | diff --git a/suspect/mrsobjects.py b/suspect/mrsobjects.py
index 7fb4764..3785dc1 100644
--- a/suspect/mrsobjects.py
+++ b/suspect/mrsobjects.py
@@ -324,3 +324,46 @@ class MRSSpectrum(MRSBase):
suspect.adjust_frequency : equivalent function
"""
return self.fid().adjust_frequency(frequency_shift).spectrum()
+
+ def slice_hz(self, lower_bound, upper_bound):
+ """
+ Creates a slice object to access the region of the spectrum between
+ the specified bounds, in Hertz.
+
+ Parameters
+ ----------
+ lower_bound : float
+ The lower frequency bound of the region, in Hertz.
+ upper_bound : float
+ The upper frequency bound of the region, in Hertz.
+
+ Returns
+ -------
+ out : Slice
+ """
+ lower_index = numpy.floor((lower_bound + self.sw / 2) / self.df)
+ upper_index = numpy.ceil((upper_bound + self.sw / 2) / self.df)
+ if lower_index < 0:
+ raise ValueError("Could not create a slice for lower bound {}, value is outside range".format(lower_bound))
+ if upper_index < 0:
+ raise ValueError("Could not create a slice for upper bound {}, value is outside range".format(upper_bound))
+ return slice(int(lower_index), int(upper_index))
+
+ def slice_ppm(self, lower_bound, upper_bound):
+ """
+ Creates a slice object to access the region of the spectrum between
+ the specified bounds, in PPM.
+
+ Parameters
+ ----------
+ lower_bound : float
+ The lower frequency bound of the region, in PPM.
+ upper_bound : float
+ The upper frequency bound of the region, in PPM.
+
+ Returns
+ -------
+ out : Slice
+ """
+ return self.slice_hz(self.ppm_to_hertz(lower_bound),
+ self.ppm_to_hertz(upper_bound))
| Slice spectrum
It should be possible to generate a slice object for a specified frequency range from an MRSSpectrum object, which can be used to extract specific regions of a spectrum (or set of spectra). | openmrslab/suspect | diff --git a/tests/test_mrs/test_core.py b/tests/test_mrs/test_core.py
index 97a9f0e..2ea638e 100644
--- a/tests/test_mrs/test_core.py
+++ b/tests/test_mrs/test_core.py
@@ -1,6 +1,7 @@
import suspect
import numpy
+import pytest
def test_adjust_zero_phase():
@@ -15,3 +16,23 @@ def test_adjust_first_phase():
numpy.testing.assert_almost_equal(phased_data[0], -1j)
numpy.testing.assert_almost_equal(phased_data[-1], numpy.exp(1j * 0.4 * numpy.pi))
numpy.testing.assert_almost_equal(phased_data[5], 1)
+
+
+def test_slice_hz():
+ data = suspect.MRSData(numpy.ones(1024), 1e-3, 123)
+ spectrum = data.spectrum()
+ whole_slice = spectrum.slice_hz(-500, 500)
+ assert whole_slice == slice(0, 1024)
+ restricted_slice = spectrum.slice_hz(-100, 200)
+ assert restricted_slice == slice(409, 717)
+ with pytest.raises(ValueError):
+ too_large_slice = spectrum.slice_hz(-1000, 1000)
+
+
+def test_slice_ppm():
+ data = suspect.MRSData(numpy.ones(1000), 1e-3, 123)
+ spectrum = data.spectrum()
+ a_slice = spectrum.slice_ppm(5.7, 3.7)
+ assert a_slice == slice(377, 623)
+ reversed_slice = spectrum.slice_ppm(3.7, 5.7)
+ assert a_slice == slice(377, 623)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
asteval==1.0.6
asttokens==3.0.0
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
charset-normalizer==3.4.1
comm==0.2.2
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
dill==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
executing==2.2.0
fastjsonschema==2.21.1
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==6.29.5
ipython==8.18.1
jedi==0.19.2
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterlab_pygments==0.3.0
lmfit==1.3.3
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mistune==3.1.3
mock==5.2.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nbsphinx==0.9.7
nest-asyncio==1.6.0
numpy==2.0.2
packaging==24.2
pandocfilters==1.5.1
Parsley==1.3
parso==0.8.4
pexpect==4.9.0
platformdirs==4.3.7
pluggy==1.5.0
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pydicom==2.4.4
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
PyWavelets==1.6.0
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rpds-py==0.24.0
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.6.3
-e git+https://github.com/openmrslab/suspect.git@f65efe543321e1d75d6ecbc7c22be73ab84d3d8a#egg=suspect
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
typing_extensions==4.13.0
uncertainties==3.2.2
urllib3==2.3.0
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.21.0
| name: suspect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- asteval==1.0.6
- asttokens==3.0.0
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- comm==0.2.2
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- dill==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- executing==2.2.0
- fastjsonschema==2.21.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==6.29.5
- ipython==8.18.1
- jedi==0.19.2
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyterlab-pygments==0.3.0
- lmfit==1.3.3
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mistune==3.1.3
- mock==5.2.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nbsphinx==0.9.7
- nest-asyncio==1.6.0
- numpy==2.0.2
- packaging==24.2
- pandocfilters==1.5.1
- parsley==1.3
- parso==0.8.4
- pexpect==4.9.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pydicom==2.4.4
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pywavelets==1.6.0
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rpds-py==0.24.0
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.6.3
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- typing-extensions==4.13.0
- uncertainties==3.2.2
- urllib3==2.3.0
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.21.0
prefix: /opt/conda/envs/suspect
| [
"tests/test_mrs/test_core.py::test_slice_hz",
"tests/test_mrs/test_core.py::test_slice_ppm"
] | [] | [
"tests/test_mrs/test_core.py::test_adjust_zero_phase",
"tests/test_mrs/test_core.py::test_adjust_first_phase"
] | [] | MIT License | 1,401 | 479 | [
"suspect/mrsobjects.py"
] |
|
mjs__imapclient-244 | 9e82aa8e7fe0a8cd3b9b6318579a873c9a1bdde6 | 2017-06-26 19:38:01 | 2abdac690fa653fa2d0d55b7617be24101597698 | mjs: OAUTH1 support was removed in #218 . Would you mind rebasing so this PR just has the `Address` fix? | diff --git a/imapclient/response_types.py b/imapclient/response_types.py
index ea5d71d..c35dd00 100644
--- a/imapclient/response_types.py
+++ b/imapclient/response_types.py
@@ -80,9 +80,12 @@ class Address(namedtuple("Address", "name route mailbox host")):
"""
def __str__(self):
- return formataddr((
- to_unicode(self.name),
- to_unicode(self.mailbox) + '@' + to_unicode(self.host)))
+ if self.mailbox and self.host:
+ address = to_unicode(self.mailbox) + '@' + to_unicode(self.host)
+ else:
+ address = to_unicode(self.mailbox or self.host)
+
+ return formataddr((to_unicode(self.name), address))
class SearchIds(list):
| Avoid TypeError when using `str` on Address tuple
Some emails have no mailbox or host (e.g. `undisclosed-recipients` case), so when parsing the ENVELOPE of the message using imapclient, we can get something like this:
```
In [8]: from imapclient.response_types import *
In [9]: addr = Address('Anonymous', None, None, 'undisclosed-recipients')
In [10]: str(addr)
---------------------------------------------------------------------------
.../lib/python3.5/site-packages/imapclient/response_types.py in __str__(self)
57 return formataddr((
58 to_unicode(self.name),
---> 59 to_unicode(self.mailbox) + '@' + to_unicode(self.host)))
60
61
TypeError: unsupported operand type(s) for +: 'NoneType' and 'str'
```
I think the `__str__` method should handle this and just returning `self.mailbox` or `self.host` if the other part is missing. I could write the PR but I prefer to have thoughs about this before. | mjs/imapclient | diff --git a/imapclient/test/test_response_parser.py b/imapclient/test/test_response_parser.py
index 3c13534..111188b 100644
--- a/imapclient/test/test_response_parser.py
+++ b/imapclient/test/test_response_parser.py
@@ -491,6 +491,12 @@ class TestParseFetchResponse(unittest.TestCase):
self.assertEqual(str(Address("Mary Jane", None, "mary", "jane.org")),
"Mary Jane <[email protected]>")
+ self.assertEqual(str(Address("Anonymous", None, "undisclosed-recipients", None)),
+ "Anonymous <undisclosed-recipients>")
+
+ self.assertEqual(str(Address(None, None, None, "undisclosed-recipients")),
+ "undisclosed-recipients")
+
def add_crlf(text):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
-e git+https://github.com/mjs/imapclient.git@9e82aa8e7fe0a8cd3b9b6318579a873c9a1bdde6#egg=IMAPClient
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: imapclient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/imapclient
| [
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_Address_str"
] | [] | [
"imapclient/test/test_response_parser.py::TestParseResponse::test_bad_literal",
"imapclient/test/test_response_parser.py::TestParseResponse::test_bad_quoting",
"imapclient/test/test_response_parser.py::TestParseResponse::test_complex_mixed",
"imapclient/test/test_response_parser.py::TestParseResponse::test_deeper_nest_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_empty_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_envelopey",
"imapclient/test/test_response_parser.py::TestParseResponse::test_envelopey_quoted",
"imapclient/test/test_response_parser.py::TestParseResponse::test_incomplete_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_int",
"imapclient/test/test_response_parser.py::TestParseResponse::test_int_and_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_literal",
"imapclient/test/test_response_parser.py::TestParseResponse::test_literal_with_more",
"imapclient/test/test_response_parser.py::TestParseResponse::test_nested_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_nil",
"imapclient/test/test_response_parser.py::TestParseResponse::test_quoted_specials",
"imapclient/test/test_response_parser.py::TestParseResponse::test_square_brackets",
"imapclient/test/test_response_parser.py::TestParseResponse::test_string",
"imapclient/test/test_response_parser.py::TestParseResponse::test_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_unquoted",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_basic",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_modseq",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_modseq_interleaved",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_modseq_no_space",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_one_id",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_BODY",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_BODYSTRUCTURE",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_BODY_HEADER_FIELDS",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_empty_addresses",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_invalid_date",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_no_date",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_FLAGS",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_INTERNALDATE",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_INTERNALDATE_normalised",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_UID",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_bad_UID",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_bad_data",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_bad_msgid",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_basic",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_literals",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_literals_and_keys_with_square_brackets",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_missing_data",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_mixed_types",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_multiple_messages",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_none_special_case",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_not_uid_is_key",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_odd_pairs",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_partial_fetch",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_same_message_appearing_multiple_times",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_simple_pairs"
] | [] | BSD License | 1,402 | 191 | [
"imapclient/response_types.py"
] |
networkx__networkx-2488 | 22a6ebaf0c235a825195e48558f39b65c26d5a1c | 2017-06-26 23:27:26 | 3f4fd85765bf2d88188cfd4c84d0707152e6cd1e | dschult: Let's go ahead and make the relabel files pep8 compliant. Our general philosophy is to improve pep8 stuff only when we touch code for other reasons. Sometimes that means only part of a file and sometimes it means the whole file. The key is to make it so the pep8 changes don't swamp the actual changes and they won't here. I don't change the tests for pep8 very often because it feels like I make the readability worse by adding so many line breaks to keep the code within 80 chars per line. But that's personal preference and depends on the test code your looking at. I'll leave that to you.
Thanks!
jarrodmillman: This is ready to merge.
I rebased on master and made a separate commit for pep8 changes outside of the functions I modified. I didn't make any new code wraps and took out an existing one in `networkx/tests/test_relabel.py` that I thought was unhelpful in terms of readability.
jarrodmillman: @dschult Ready for merge. | diff --git a/networkx/classes/digraph.py b/networkx/classes/digraph.py
index 73a59c30c..2c84b9d35 100644
--- a/networkx/classes/digraph.py
+++ b/networkx/classes/digraph.py
@@ -1124,7 +1124,8 @@ class DiGraph(Graph):
H.add_edges_from((v, u, deepcopy(d)) for u, v, d
in self.edges(data=True))
H.graph = deepcopy(self.graph)
- H._node = deepcopy(self._node)
+ for n in self._node:
+ H._node[n] = deepcopy(self._node[n])
else:
self._pred, self._succ = self._succ, self._pred
self._adj = self._succ
diff --git a/networkx/relabel.py b/networkx/relabel.py
index 569ba2c02..9cac3f169 100644
--- a/networkx/relabel.py
+++ b/networkx/relabel.py
@@ -6,10 +6,11 @@
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
- 'Pieter Swart ([email protected])',
- 'Dan Schult ([email protected])'])
+ 'Pieter Swart ([email protected])',
+ 'Dan Schult ([email protected])'])
__all__ = ['convert_node_labels_to_integers', 'relabel_nodes']
+
def relabel_nodes(G, mapping, copy=True):
"""Relabel the nodes of the graph G.
@@ -76,21 +77,21 @@ def relabel_nodes(G, mapping, copy=True):
Only the nodes specified in the mapping will be relabeled.
The keyword setting copy=False modifies the graph in place.
- Relabel_nodes avoids naming collisions by building a
+ Relabel_nodes avoids naming collisions by building a
directed graph from ``mapping`` which specifies the order of
relabelings. Naming collisions, such as a->b, b->c, are ordered
such that "b" gets renamed to "c" before "a" gets renamed "b".
- In cases of circular mappings (e.g. a->b, b->a), modifying the
+ In cases of circular mappings (e.g. a->b, b->a), modifying the
graph is not possible in-place and an exception is raised.
In that case, use copy=True.
-
+
See Also
--------
convert_node_labels_to_integers
"""
# you can pass a function f(old_label)->new_label
# but we'll just make a dictionary here regardless
- if not hasattr(mapping,"__getitem__"):
+ if not hasattr(mapping, "__getitem__"):
m = dict((n, mapping(n)) for n in G)
else:
m = mapping
@@ -131,38 +132,39 @@ def _relabel_inplace(G, mapping):
try:
G.add_node(new, **G.node[old])
except KeyError:
- raise KeyError("Node %s is not in the graph"%old)
+ raise KeyError("Node %s is not in the graph" % old)
if multigraph:
new_edges = [(new, new if old == target else target, key, data)
- for (_,target,key,data)
+ for (_, target, key, data)
in G.edges(old, data=True, keys=True)]
if directed:
new_edges += [(new if old == source else source, new, key, data)
- for (source, _, key,data)
+ for (source, _, key, data)
in G.in_edges(old, data=True, keys=True)]
else:
new_edges = [(new, new if old == target else target, data)
- for (_,target,data) in G.edges(old, data=True)]
+ for (_, target, data) in G.edges(old, data=True)]
if directed:
- new_edges += [(new if old == source else source,new,data)
- for (source,_,data) in G.in_edges(old, data=True)]
+ new_edges += [(new if old == source else source, new, data)
+ for (source, _, data) in G.in_edges(old, data=True)]
G.remove_node(old)
G.add_edges_from(new_edges)
return G
+
def _relabel_copy(G, mapping):
H = G.__class__()
+ H.add_nodes_from(mapping.get(n, n) for n in G)
+ H._node.update(dict((mapping.get(n, n), d.copy()) for n, d in G.node.items()))
if G.name:
H.name = "(%s)" % G.name
if G.is_multigraph():
- H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),k,d.copy())
- for (n1,n2,k,d) in G.edges(keys=True, data=True))
+ H.add_edges_from((mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy())
+ for (n1, n2, k, d) in G.edges(keys=True, data=True))
else:
- H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),d.copy())
- for (n1, n2, d) in G.edges(data=True))
+ H.add_edges_from((mapping.get(n1, n1), mapping.get(n2, n2), d.copy())
+ for (n1, n2, d) in G.edges(data=True))
- H.add_nodes_from(mapping.get(n, n) for n in G)
- H._node.update(dict((mapping.get(n, n), d.copy()) for n,d in G.node.items()))
H.graph.update(G.graph.copy())
return H
@@ -200,27 +202,27 @@ def convert_node_labels_to_integers(G, first_label=0, ordering="default",
--------
relabel_nodes
"""
- N = G.number_of_nodes()+first_label
+ N = G.number_of_nodes() + first_label
if ordering == "default":
mapping = dict(zip(G.nodes(), range(first_label, N)))
elif ordering == "sorted":
nlist = sorted(G.nodes())
mapping = dict(zip(nlist, range(first_label, N)))
elif ordering == "increasing degree":
- dv_pairs = [(d,n) for (n,d) in G.degree()]
- dv_pairs.sort() # in-place sort from lowest to highest degree
- mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N)))
+ dv_pairs = [(d, n) for (n, d) in G.degree()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
+ mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
elif ordering == "decreasing degree":
- dv_pairs = [(d,n) for (n,d) in G.degree()]
- dv_pairs.sort() # in-place sort from lowest to highest degree
+ dv_pairs = [(d, n) for (n, d) in G.degree()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
dv_pairs.reverse()
- mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N)))
+ mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
else:
- raise nx.NetworkXError('Unknown node ordering: %s'%ordering)
+ raise nx.NetworkXError('Unknown node ordering: %s' % ordering)
H = relabel_nodes(G, mapping)
- H.name = "("+G.name+")_with_int_labels"
+ H.name = "(" + G.name + ")_with_int_labels"
# create node attribute with the old label
if label_attribute is not None:
nx.set_node_attributes(H, label_attribute,
- dict((v,k) for k,v in mapping.items()))
+ dict((v, k) for k, v in mapping.items()))
return H
| _relabel_copy and OrderedGraph
The current behavior of `_relabel_copy()` is somewhat unintuitive for OrderedGraphs, where the nodes in the newly created graph are ordered according to the edge iterator of the original graph, not the node iterator. I think this would be fixed by placing `H.add_nodes_from(mapping.get(n, n) for n in G)` at the beginning of the function. | networkx/networkx | diff --git a/networkx/classes/tests/test_digraph.py b/networkx/classes/tests/test_digraph.py
index b46530e4d..c5b50c720 100644
--- a/networkx/classes/tests/test_digraph.py
+++ b/networkx/classes/tests/test_digraph.py
@@ -1,173 +1,192 @@
#!/usr/bin/env python
-from nose.tools import *
-import networkx
+
+from nose.tools import assert_equal
+from nose.tools import assert_false
+from nose.tools import assert_true
+from nose.tools import assert_raises
+
+
+import networkx as nx
+from networkx.testing import assert_nodes_equal
from test_graph import BaseGraphTester, BaseAttrGraphTester, TestGraph
from test_graph import TestEdgeSubgraph as TestGraphEdgeSubgraph
+
class BaseDiGraphTester(BaseGraphTester):
def test_has_successor(self):
- G=self.K3
- assert_equal(G.has_successor(0,1),True)
- assert_equal(G.has_successor(0,-1),False)
+ G = self.K3
+ assert_equal(G.has_successor(0, 1), True)
+ assert_equal(G.has_successor(0, -1), False)
def test_successors(self):
- G=self.K3
- assert_equal(sorted(G.successors(0)),[1,2])
- assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
+ G = self.K3
+ assert_equal(sorted(G.successors(0)), [1, 2])
+ assert_raises((KeyError, nx.NetworkXError), G.successors, -1)
def test_has_predecessor(self):
- G=self.K3
- assert_equal(G.has_predecessor(0,1),True)
- assert_equal(G.has_predecessor(0,-1),False)
+ G = self.K3
+ assert_equal(G.has_predecessor(0, 1), True)
+ assert_equal(G.has_predecessor(0, -1), False)
def test_predecessors(self):
- G=self.K3
- assert_equal(sorted(G.predecessors(0)),[1,2])
- assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
+ G = self.K3
+ assert_equal(sorted(G.predecessors(0)), [1, 2])
+ assert_raises((KeyError, nx.NetworkXError), G.predecessors, -1)
def test_edges(self):
- G=self.K3
- assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
- assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
+ G = self.K3
+ assert_equal(sorted(G.edges()), [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)])
+ assert_equal(sorted(G.edges(0)), [(0, 1), (0, 2)])
assert_equal(sorted(G.edges([0, 1])), [(0, 1), (0, 2), (1, 0), (1, 2)])
- assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
+ assert_raises((KeyError, nx.NetworkXError), G.edges, -1)
def test_edges_data(self):
- G=self.K3
- all_edges = [(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})]
+ G = self.K3
+ all_edges = [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})]
assert_equal(sorted(G.edges(data=True)), all_edges)
assert_equal(sorted(G.edges(0, data=True)), all_edges[:2])
assert_equal(sorted(G.edges([0, 1], data=True)), all_edges[:4])
- assert_raises((KeyError,networkx.NetworkXError), G.edges, -1, True)
+ assert_raises((KeyError, nx.NetworkXError), G.edges, -1, True)
def test_out_edges(self):
- G=self.K3
- assert_equal(sorted(G.out_edges()),
- [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
- assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
- assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1)
+ G = self.K3
+ assert_equal(sorted(G.out_edges()), [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)])
+ assert_equal(sorted(G.out_edges(0)), [(0, 1), (0, 2)])
+ assert_raises((KeyError, nx.NetworkXError), G.out_edges, -1)
def test_out_edges_dir(self):
- G=self.P3
- assert_equal(sorted(G.out_edges()),[(0, 1), (1, 2)])
- assert_equal(sorted(G.out_edges(0)),[(0, 1)])
- assert_equal(sorted(G.out_edges(2)),[])
+ G = self.P3
+ assert_equal(sorted(G.out_edges()), [(0, 1), (1, 2)])
+ assert_equal(sorted(G.out_edges(0)), [(0, 1)])
+ assert_equal(sorted(G.out_edges(2)), [])
def test_out_edges_data(self):
- G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.out_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.out_edges(0, data=True)), [(0, 1, {'data' : 0})])
+ G = nx.DiGraph([(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.out_edges(data=True)), [(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.out_edges(0, data=True)), [(0, 1, {'data': 0})])
assert_equal(sorted(G.out_edges(data='data')), [(0, 1, 0), (1, 0, None)])
assert_equal(sorted(G.out_edges(0, data='data')), [(0, 1, 0)])
def test_in_edges_dir(self):
- G=self.P3
+ G = self.P3
assert_equal(sorted(G.in_edges()), [(0, 1), (1, 2)])
assert_equal(sorted(G.in_edges(0)), [])
- assert_equal(sorted(G.in_edges(2)), [(1,2)])
+ assert_equal(sorted(G.in_edges(2)), [(1, 2)])
def test_in_edges_data(self):
- G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.in_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.in_edges(1, data=True)), [(0, 1, {'data' : 0})])
+ G = nx.DiGraph([(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.in_edges(data=True)), [(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.in_edges(1, data=True)), [(0, 1, {'data': 0})])
assert_equal(sorted(G.in_edges(data='data')), [(0, 1, 0), (1, 0, None)])
assert_equal(sorted(G.in_edges(1, data='data')), [(0, 1, 0)])
def test_degree(self):
- G=self.K3
- assert_equal(sorted(G.degree()),[(0,4),(1,4),(2,4)])
- assert_equal(dict(G.degree()),{0:4,1:4,2:4})
+ G = self.K3
+ assert_equal(sorted(G.degree()), [(0, 4), (1, 4), (2, 4)])
+ assert_equal(dict(G.degree()), {0: 4, 1: 4, 2: 4})
assert_equal(G.degree(0), 4)
- assert_equal(list(G.degree(iter([0]))), [(0, 4)]) #run through iterator
+ assert_equal(list(G.degree(iter([0]))), [
+ (0, 4)]) # run through iterator
def test_in_degree(self):
- G=self.K3
- assert_equal(sorted(G.in_degree()),[(0,2),(1,2),(2,2)])
- assert_equal(dict(G.in_degree()),{0:2,1:2,2:2})
+ G = self.K3
+ assert_equal(sorted(G.in_degree()), [(0, 2), (1, 2), (2, 2)])
+ assert_equal(dict(G.in_degree()), {0: 2, 1: 2, 2: 2})
assert_equal(G.in_degree(0), 2)
- assert_equal(list(G.in_degree(iter([0]))), [(0, 2)]) #run through iterator
+ assert_equal(list(G.in_degree(iter([0]))), [(0, 2)]) # run through iterator
def test_in_degree_weighted(self):
- G=self.K3
- G.add_edge(0,1,weight=0.3,other=1.2)
- assert_equal(sorted(G.in_degree(weight='weight')),[(0,2),(1,1.3),(2,2)])
- assert_equal(dict(G.in_degree(weight='weight')),{0:2,1:1.3,2:2})
- assert_equal(G.in_degree(1,weight='weight'), 1.3)
- assert_equal(sorted(G.in_degree(weight='other')),[(0,2),(1,2.2),(2,2)])
- assert_equal(dict(G.in_degree(weight='other')),{0:2,1:2.2,2:2})
- assert_equal(G.in_degree(1,weight='other'), 2.2)
- assert_equal(list(G.in_degree(iter([1]),weight='other')), [(1, 2.2)])
+ G = self.K3
+ G.add_edge(0, 1, weight=0.3, other=1.2)
+ assert_equal(sorted(G.in_degree(weight='weight')), [(0, 2), (1, 1.3), (2, 2)])
+ assert_equal(dict(G.in_degree(weight='weight')), {0: 2, 1: 1.3, 2: 2})
+ assert_equal(G.in_degree(1, weight='weight'), 1.3)
+ assert_equal(sorted(G.in_degree(weight='other')), [(0, 2), (1, 2.2), (2, 2)])
+ assert_equal(dict(G.in_degree(weight='other')), {0: 2, 1: 2.2, 2: 2})
+ assert_equal(G.in_degree(1, weight='other'), 2.2)
+ assert_equal(list(G.in_degree(iter([1]), weight='other')), [(1, 2.2)])
def test_out_degree_weighted(self):
- G=self.K3
- G.add_edge(0,1,weight=0.3,other=1.2)
- assert_equal(sorted(G.out_degree(weight='weight')),[(0,1.3),(1,2),(2,2)])
- assert_equal(dict(G.out_degree(weight='weight')),{0:1.3,1:2,2:2})
- assert_equal(G.out_degree(0,weight='weight'), 1.3)
- assert_equal(sorted(G.out_degree(weight='other')),[(0,2.2),(1,2),(2,2)])
- assert_equal(dict(G.out_degree(weight='other')),{0:2.2,1:2,2:2})
- assert_equal(G.out_degree(0,weight='other'), 2.2)
+ G = self.K3
+ G.add_edge(0, 1, weight=0.3, other=1.2)
+ assert_equal(sorted(G.out_degree(weight='weight')), [(0, 1.3), (1, 2), (2, 2)])
+ assert_equal(dict(G.out_degree(weight='weight')), {0: 1.3, 1: 2, 2: 2})
+ assert_equal(G.out_degree(0, weight='weight'), 1.3)
+ assert_equal(sorted(G.out_degree(weight='other')), [(0, 2.2), (1, 2), (2, 2)])
+ assert_equal(dict(G.out_degree(weight='other')), {0: 2.2, 1: 2, 2: 2})
+ assert_equal(G.out_degree(0, weight='other'), 2.2)
assert_equal(list(G.out_degree(iter([0]), weight='other')), [(0, 2.2)])
def test_out_degree(self):
- G=self.K3
- assert_equal(sorted(G.out_degree()),[(0,2),(1,2),(2,2)])
- assert_equal(dict(G.out_degree()),{0:2,1:2,2:2})
+ G = self.K3
+ assert_equal(sorted(G.out_degree()), [(0, 2), (1, 2), (2, 2)])
+ assert_equal(dict(G.out_degree()), {0: 2, 1: 2, 2: 2})
assert_equal(G.out_degree(0), 2)
assert_equal(list(G.out_degree(iter([0]))), [(0, 2)])
def test_size(self):
- G=self.K3
- assert_equal(G.size(),6)
- assert_equal(G.number_of_edges(),6)
+ G = self.K3
+ assert_equal(G.size(), 6)
+ assert_equal(G.number_of_edges(), 6)
def test_to_undirected_reciprocal(self):
- G=self.Graph()
- G.add_edge(1,2)
- assert_true(G.to_undirected().has_edge(1,2))
- assert_false(G.to_undirected(reciprocal=True).has_edge(1,2))
- G.add_edge(2,1)
- assert_true(G.to_undirected(reciprocal=True).has_edge(1,2))
+ G = self.Graph()
+ G.add_edge(1, 2)
+ assert_true(G.to_undirected().has_edge(1, 2))
+ assert_false(G.to_undirected(reciprocal=True).has_edge(1, 2))
+ G.add_edge(2, 1)
+ assert_true(G.to_undirected(reciprocal=True).has_edge(1, 2))
def test_reverse_copy(self):
- G=networkx.DiGraph([(0,1),(1,2)])
- R=G.reverse()
- assert_equal(sorted(R.edges()),[(1,0),(2,1)])
- R.remove_edge(1,0)
- assert_equal(sorted(R.edges()),[(2,1)])
- assert_equal(sorted(G.edges()),[(0,1),(1,2)])
+ G = nx.DiGraph([(0, 1), (1, 2)])
+ R = G.reverse()
+ assert_equal(sorted(R.edges()), [(1, 0), (2, 1)])
+ R.remove_edge(1, 0)
+ assert_equal(sorted(R.edges()), [(2, 1)])
+ assert_equal(sorted(G.edges()), [(0, 1), (1, 2)])
def test_reverse_nocopy(self):
- G=networkx.DiGraph([(0,1),(1,2)])
- R=G.reverse(copy=False)
- assert_equal(sorted(R.edges()),[(1,0),(2,1)])
- R.remove_edge(1,0)
- assert_equal(sorted(R.edges()),[(2,1)])
- assert_equal(sorted(G.edges()),[(2,1)])
-
-
-class BaseAttrDiGraphTester(BaseDiGraphTester,BaseAttrGraphTester):
+ G = nx.DiGraph([(0, 1), (1, 2)])
+ R = G.reverse(copy=False)
+ assert_equal(sorted(R.edges()), [(1, 0), (2, 1)])
+ R.remove_edge(1, 0)
+ assert_equal(sorted(R.edges()), [(2, 1)])
+ assert_equal(sorted(G.edges()), [(2, 1)])
+
+ def test_reverse_hashable(self):
+ class Foo(object):
+ pass
+ x = Foo()
+ y = Foo()
+ G = nx.DiGraph()
+ G.add_edge(x, y)
+ assert_nodes_equal(G.nodes(), G.reverse().nodes())
+ assert_equal([(y, x)], list(G.reverse().edges()))
+
+
+class BaseAttrDiGraphTester(BaseDiGraphTester, BaseAttrGraphTester):
pass
-class TestDiGraph(BaseAttrDiGraphTester,TestGraph):
+class TestDiGraph(BaseAttrDiGraphTester, TestGraph):
"""Tests specific to dict-of-dict-of-dict digraph data structure"""
+
def setUp(self):
- self.Graph=networkx.DiGraph
+ self.Graph = nx.DiGraph
# build dict-of-dict-of-dict K3
- ed1,ed2,ed3,ed4,ed5,ed6 = ({},{},{},{},{},{})
- self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1:ed6}}
+ ed1, ed2, ed3, ed4, ed5, ed6 = ({}, {}, {}, {}, {}, {})
+ self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}}
self.k3edges = [(0, 1), (0, 2), (1, 2)]
self.k3nodes = [0, 1, 2]
self.K3 = self.Graph()
self.K3._adj = self.K3._succ = self.k3adj
- self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1:ed4}}
+ self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}}
self.K3._node = {}
self.K3._node[0] = {}
self.K3._node[1] = {}
self.K3._node[2] = {}
- ed1,ed2 = ({},{})
+ ed1, ed2 = ({}, {})
self.P3 = self.Graph()
self.P3._adj = {0: {1: ed1}, 1: {2: ed2}, 2: {}}
self.P3._succ = self.P3._adj
@@ -178,48 +197,48 @@ class TestDiGraph(BaseAttrDiGraphTester,TestGraph):
self.P3._node[2] = {}
def test_data_input(self):
- G=self.Graph(data={1:[2],2:[1]}, name="test")
- assert_equal(G.name,"test")
- assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
- assert_equal(sorted(G.succ.items()),[(1, {2: {}}), (2, {1: {}})])
- assert_equal(sorted(G.pred.items()),[(1, {2: {}}), (2, {1: {}})])
+ G = self.Graph(data={1: [2], 2: [1]}, name="test")
+ assert_equal(G.name, "test")
+ assert_equal(sorted(G.adj.items()), [(1, {2: {}}), (2, {1: {}})])
+ assert_equal(sorted(G.succ.items()), [(1, {2: {}}), (2, {1: {}})])
+ assert_equal(sorted(G.pred.items()), [(1, {2: {}}), (2, {1: {}})])
def test_add_edge(self):
- G=self.Graph()
- G.add_edge(0,1)
- assert_equal(G.adj,{0: {1: {}}, 1: {}})
- assert_equal(G.succ,{0: {1: {}}, 1: {}})
- assert_equal(G.pred,{0: {}, 1: {0:{}}})
- G=self.Graph()
- G.add_edge(*(0,1))
- assert_equal(G.adj,{0: {1: {}}, 1: {}})
- assert_equal(G.succ,{0: {1: {}}, 1: {}})
- assert_equal(G.pred,{0: {}, 1: {0:{}}})
+ G = self.Graph()
+ G.add_edge(0, 1)
+ assert_equal(G.adj, {0: {1: {}}, 1: {}})
+ assert_equal(G.succ, {0: {1: {}}, 1: {}})
+ assert_equal(G.pred, {0: {}, 1: {0: {}}})
+ G = self.Graph()
+ G.add_edge(*(0, 1))
+ assert_equal(G.adj, {0: {1: {}}, 1: {}})
+ assert_equal(G.succ, {0: {1: {}}, 1: {}})
+ assert_equal(G.pred, {0: {}, 1: {0: {}}})
def test_add_edges_from(self):
- G=self.Graph()
- G.add_edges_from([(0,1),(0,2,{'data':3})],data=2)
- assert_equal(G.adj,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
- assert_equal(G.succ,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
- assert_equal(G.pred,{0: {}, 1: {0: {'data':2}}, 2: {0: {'data':3}}})
+ G = self.Graph()
+ G.add_edges_from([(0, 1), (0, 2, {'data': 3})], data=2)
+ assert_equal(G.adj, {0: {1: {'data': 2}, 2: {'data': 3}}, 1: {}, 2: {}})
+ assert_equal(G.succ, {0: {1: {'data': 2}, 2: {'data': 3}}, 1: {}, 2: {}})
+ assert_equal(G.pred, {0: {}, 1: {0: {'data': 2}}, 2: {0: {'data': 3}}})
- assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple
- assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3)]) # too many in tuple
- assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
+ assert_raises(nx.NetworkXError, G.add_edges_from, [(0,)]) # too few in tuple
+ assert_raises(nx.NetworkXError, G.add_edges_from, [(0, 1, 2, 3)]) # too many in tuple
+ assert_raises(TypeError, G.add_edges_from, [0]) # not a tuple
def test_remove_edge(self):
- G=self.K3
- G.remove_edge(0,1)
- assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
- assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1:{}}})
- assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+ G = self.K3
+ G.remove_edge(0, 1)
+ assert_equal(G.succ, {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}})
+ assert_equal(G.pred, {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}})
+ assert_raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0)
def test_remove_edges_from(self):
- G=self.K3
- G.remove_edges_from([(0,1)])
- assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
- assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1: {}}})
- G.remove_edges_from([(0,0)]) # silent fail
+ G = self.K3
+ G.remove_edges_from([(0, 1)])
+ assert_equal(G.succ, {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}})
+ assert_equal(G.pred, {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}})
+ G.remove_edges_from([(0, 0)]) # silent fail
class TestEdgeSubgraph(TestGraphEdgeSubgraph):
@@ -227,7 +246,7 @@ class TestEdgeSubgraph(TestGraphEdgeSubgraph):
def setup(self):
# Create a doubly-linked path graph on five nodes.
- G = networkx.DiGraph(networkx.path_graph(5))
+ G = nx.DiGraph(nx.path_graph(5))
# Add some node, edge, and graph attributes.
for i in range(5):
G.node[i]['name'] = 'node{}'.format(i)
@@ -244,7 +263,7 @@ class TestEdgeSubgraph(TestGraphEdgeSubgraph):
For more information, see GitHub issue #2370.
"""
- G = networkx.DiGraph()
+ G = nx.DiGraph()
G.add_edge(0, 1)
H = G.edge_subgraph([(0, 1)])
assert_equal(list(H.predecessors(0)), [])
diff --git a/networkx/tests/test_relabel.py b/networkx/tests/test_relabel.py
index 31d25fe90..f99ec7354 100644
--- a/networkx/tests/test_relabel.py
+++ b/networkx/tests/test_relabel.py
@@ -3,153 +3,161 @@ from nose.tools import *
from networkx import *
from networkx.convert import *
from networkx.algorithms.operators import *
-from networkx.generators.classic import barbell_graph,cycle_graph
+from networkx.generators.classic import barbell_graph, cycle_graph
from networkx.testing import *
+
class TestRelabel():
def test_convert_node_labels_to_integers(self):
# test that empty graph converts fine for all options
- G=empty_graph()
- H=convert_node_labels_to_integers(G,100)
+ G = empty_graph()
+ H = convert_node_labels_to_integers(G, 100)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(list(H.nodes()), [])
assert_equal(list(H.edges()), [])
- for opt in ["default", "sorted", "increasing degree",
- "decreasing degree"]:
- G=empty_graph()
- H=convert_node_labels_to_integers(G,100, ordering=opt)
+ for opt in ["default", "sorted", "increasing degree", "decreasing degree"]:
+ G = empty_graph()
+ H = convert_node_labels_to_integers(G, 100, ordering=opt)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(list(H.nodes()), [])
assert_equal(list(H.edges()), [])
- G=empty_graph()
- G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
- G.name="paw"
- H=convert_node_labels_to_integers(G)
+ G = empty_graph()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ G.name = "paw"
+ H = convert_node_labels_to_integers(G)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- H=convert_node_labels_to_integers(G,1000)
+ H = convert_node_labels_to_integers(G, 1000)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
assert_nodes_equal(H.nodes(), [1000, 1001, 1002, 1003])
- H=convert_node_labels_to_integers(G,ordering="increasing degree")
+ H = convert_node_labels_to_integers(G, ordering="increasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- assert_equal(degree(H,0), 1)
- assert_equal(degree(H,1), 2)
- assert_equal(degree(H,2), 2)
- assert_equal(degree(H,3), 3)
+ assert_equal(degree(H, 0), 1)
+ assert_equal(degree(H, 1), 2)
+ assert_equal(degree(H, 2), 2)
+ assert_equal(degree(H, 3), 3)
- H=convert_node_labels_to_integers(G,ordering="decreasing degree")
+ H = convert_node_labels_to_integers(G, ordering="decreasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- assert_equal(degree(H,0), 3)
- assert_equal(degree(H,1), 2)
- assert_equal(degree(H,2), 2)
- assert_equal(degree(H,3), 1)
+ assert_equal(degree(H, 0), 3)
+ assert_equal(degree(H, 1), 2)
+ assert_equal(degree(H, 2), 2)
+ assert_equal(degree(H, 3), 1)
- H=convert_node_labels_to_integers(G,ordering="increasing degree",
- label_attribute='label')
+ H = convert_node_labels_to_integers(G, ordering="increasing degree",
+ label_attribute='label')
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- assert_equal(degree(H,0), 1)
- assert_equal(degree(H,1), 2)
- assert_equal(degree(H,2), 2)
- assert_equal(degree(H,3), 3)
+ assert_equal(degree(H, 0), 1)
+ assert_equal(degree(H, 1), 2)
+ assert_equal(degree(H, 2), 2)
+ assert_equal(degree(H, 3), 3)
# check mapping
- assert_equal(H.node[3]['label'],'C')
- assert_equal(H.node[0]['label'],'D')
- assert_true(H.node[1]['label']=='A' or H.node[2]['label']=='A')
- assert_true(H.node[1]['label']=='B' or H.node[2]['label']=='B')
+ assert_equal(H.node[3]['label'], 'C')
+ assert_equal(H.node[0]['label'], 'D')
+ assert_true(H.node[1]['label'] == 'A' or H.node[2]['label'] == 'A')
+ assert_true(H.node[1]['label'] == 'B' or H.node[2]['label'] == 'B')
def test_convert_to_integers2(self):
- G=empty_graph()
- G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')])
- G.name="paw"
- H=convert_node_labels_to_integers(G,ordering="sorted")
+ G = empty_graph()
+ G.add_edges_from([('C', 'D'), ('A', 'B'), ('A', 'C'), ('B', 'C')])
+ G.name = "paw"
+ H = convert_node_labels_to_integers(G, ordering="sorted")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- H=convert_node_labels_to_integers(G,ordering="sorted",
- label_attribute='label')
- assert_equal(H.node[0]['label'],'A')
- assert_equal(H.node[1]['label'],'B')
- assert_equal(H.node[2]['label'],'C')
- assert_equal(H.node[3]['label'],'D')
+ H = convert_node_labels_to_integers(G, ordering="sorted",
+ label_attribute='label')
+ assert_equal(H.node[0]['label'], 'A')
+ assert_equal(H.node[1]['label'], 'B')
+ assert_equal(H.node[2]['label'], 'C')
+ assert_equal(H.node[3]['label'], 'D')
@raises(nx.NetworkXError)
def test_convert_to_integers_raise(self):
G = nx.Graph()
- H=convert_node_labels_to_integers(G,ordering="increasing age")
-
+ H = convert_node_labels_to_integers(G, ordering="increasing age")
def test_relabel_nodes_copy(self):
- G=empty_graph()
- G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
- H=relabel_nodes(G,mapping)
+ G = empty_graph()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'}
+ H = relabel_nodes(G, mapping)
assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_function(self):
- G=empty_graph()
- G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
+ G = empty_graph()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
# function mapping no longer encouraged but works
+
def mapping(n):
return ord(n)
- H=relabel_nodes(G,mapping)
+ H = relabel_nodes(G, mapping)
assert_nodes_equal(H.nodes(), [65, 66, 67, 68])
def test_relabel_nodes_graph(self):
- G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
- H=relabel_nodes(G,mapping)
+ G = Graph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'}
+ H = relabel_nodes(G, mapping)
assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog'])
+ def test_relabel_nodes_orderedgraph(self):
+ G = OrderedGraph()
+ G.add_nodes_from([1, 2, 3])
+ G.add_edges_from([(1, 3), (2, 3)])
+ mapping = {1: 'a', 2: 'b', 3: 'c'}
+ H = relabel_nodes(G, mapping)
+ assert list(H.nodes) == ['a', 'b', 'c']
+
def test_relabel_nodes_digraph(self):
- G=DiGraph([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
- H=relabel_nodes(G,mapping,copy=False)
+ G = DiGraph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'}
+ H = relabel_nodes(G, mapping, copy=False)
assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_multigraph(self):
- G=MultiGraph([('a','b'),('a','b')])
- mapping={'a':'aardvark','b':'bear'}
- G=relabel_nodes(G,mapping,copy=False)
- assert_nodes_equal(G.nodes(),['aardvark', 'bear'])
- assert_edges_equal(G.edges(),[('aardvark', 'bear'), ('aardvark', 'bear')])
+ G = MultiGraph([('a', 'b'), ('a', 'b')])
+ mapping = {'a': 'aardvark', 'b': 'bear'}
+ G = relabel_nodes(G, mapping, copy=False)
+ assert_nodes_equal(G.nodes(), ['aardvark', 'bear'])
+ assert_edges_equal(G.edges(), [('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_nodes_multidigraph(self):
- G=MultiDiGraph([('a','b'),('a','b')])
- mapping={'a':'aardvark','b':'bear'}
- G=relabel_nodes(G,mapping,copy=False)
- assert_nodes_equal(G.nodes(),['aardvark', 'bear'])
- assert_edges_equal(G.edges(),[('aardvark', 'bear'), ('aardvark', 'bear')])
+ G = MultiDiGraph([('a', 'b'), ('a', 'b')])
+ mapping = {'a': 'aardvark', 'b': 'bear'}
+ G = relabel_nodes(G, mapping, copy=False)
+ assert_nodes_equal(G.nodes(), ['aardvark', 'bear'])
+ assert_edges_equal(G.edges(), [('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_isolated_nodes_to_same(self):
- G=Graph()
+ G = Graph()
G.add_nodes_from(range(4))
- mapping={1:1}
- H=relabel_nodes(G, mapping, copy=False)
+ mapping = {1: 1}
+ H = relabel_nodes(G, mapping, copy=False)
assert_nodes_equal(H.nodes(), list(range(4)))
@raises(KeyError)
def test_relabel_nodes_missing(self):
- G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={0:'aardvark'}
- G=relabel_nodes(G,mapping,copy=False)
+ G = Graph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {0: 'aardvark'}
+ G = relabel_nodes(G, mapping, copy=False)
def test_relabel_copy_name(self):
- G=Graph()
+ G = Graph()
H = relabel_nodes(G, {}, copy=True)
assert_equal(H.graph, G.graph)
H = relabel_nodes(G, {}, copy=False)
@@ -161,22 +169,21 @@ class TestRelabel():
assert_equal(H.graph, G.graph)
def test_relabel_toposort(self):
- K5=nx.complete_graph(4)
- G=nx.complete_graph(4)
- G=nx.relabel_nodes(G,dict( [(i,i+1) for i in range(4)]),copy=False)
- nx.is_isomorphic(K5,G)
- G=nx.complete_graph(4)
- G=nx.relabel_nodes(G,dict( [(i,i-1) for i in range(4)]),copy=False)
- nx.is_isomorphic(K5,G)
-
+ K5 = nx.complete_graph(4)
+ G = nx.complete_graph(4)
+ G = nx.relabel_nodes(G, dict([(i, i + 1) for i in range(4)]), copy=False)
+ nx.is_isomorphic(K5, G)
+ G = nx.complete_graph(4)
+ G = nx.relabel_nodes(G, dict([(i, i - 1) for i in range(4)]), copy=False)
+ nx.is_isomorphic(K5, G)
def test_relabel_selfloop(self):
G = nx.DiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
- assert_nodes_equal(G.nodes(),['One','Three','Two'])
+ assert_nodes_equal(G.nodes(), ['One', 'Three', 'Two'])
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
- assert_nodes_equal(G.nodes(),['One','Three','Two'])
+ assert_nodes_equal(G.nodes(), ['One', 'Three', 'Two'])
G = nx.MultiDiGraph([(1, 1)])
G = nx.relabel_nodes(G, {1: 0}, copy=False)
assert_nodes_equal(G.nodes(), [0])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | help | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@22a6ebaf0c235a825195e48558f39b65c26d5a1c#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_reverse_hashable",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_orderedgraph"
] | [
"networkx/classes/tests/test_digraph.py::TestGraph::test_contains",
"networkx/classes/tests/test_digraph.py::TestGraph::test_order",
"networkx/classes/tests/test_digraph.py::TestGraph::test_nodes",
"networkx/classes/tests/test_digraph.py::TestGraph::test_has_node",
"networkx/classes/tests/test_digraph.py::TestGraph::test_has_edge",
"networkx/classes/tests/test_digraph.py::TestGraph::test_neighbors",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edges",
"networkx/classes/tests/test_digraph.py::TestGraph::test_degree",
"networkx/classes/tests/test_digraph.py::TestGraph::test_size",
"networkx/classes/tests/test_digraph.py::TestGraph::test_nbunch_iter",
"networkx/classes/tests/test_digraph.py::TestGraph::test_nbunch_iter_node_format_raise",
"networkx/classes/tests/test_digraph.py::TestGraph::test_selfloop_degree",
"networkx/classes/tests/test_digraph.py::TestGraph::test_selfloops",
"networkx/classes/tests/test_digraph.py::TestGraph::test_weighted_degree",
"networkx/classes/tests/test_digraph.py::TestGraph::test_name",
"networkx/classes/tests/test_digraph.py::TestGraph::test_copy",
"networkx/classes/tests/test_digraph.py::TestGraph::test_class_copy",
"networkx/classes/tests/test_digraph.py::TestGraph::test_attr_reference",
"networkx/classes/tests/test_digraph.py::TestGraph::test_fresh_copy",
"networkx/classes/tests/test_digraph.py::TestGraph::test_graph_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_node_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_node_attr2",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_lookup",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr2",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr3",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr4",
"networkx/classes/tests/test_digraph.py::TestGraph::test_to_undirected",
"networkx/classes/tests/test_digraph.py::TestGraph::test_to_directed",
"networkx/classes/tests/test_digraph.py::TestGraph::test_subgraph",
"networkx/classes/tests/test_digraph.py::TestGraph::test_selfloops_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_data_input",
"networkx/classes/tests/test_digraph.py::TestGraph::test_adjacency",
"networkx/classes/tests/test_digraph.py::TestGraph::test_getitem",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_nodes_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_nodes_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_edge",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_edges_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_edge",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_edges_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_clear",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edges_data",
"networkx/classes/tests/test_digraph.py::TestGraph::test_get_edge_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_contains",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_order",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_nodes",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_node",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_edge",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_neighbors",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_nbunch_iter",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_nbunch_iter_node_format_raise",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_selfloop_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_selfloops",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_weighted_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_name",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_class_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_attr_reference",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_fresh_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_graph_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_node_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_node_attr2",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_lookup",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr2",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr3",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr4",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_to_undirected",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_to_directed",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_subgraph",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_selfloops_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_adjacency",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_getitem",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_nodes_from",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_nodes_from",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_clear",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_get_edge_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_successor",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_successors",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_predecessor",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_predecessors",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edges",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edges_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_edges",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_edges_dir",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_edges_dir",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_degree_weighted",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_degree_weighted",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_size",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_to_undirected_reciprocal",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_data_input",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_edge",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_edges_from",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_edge",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_edges_from",
"networkx/tests/test_relabel.py::test"
] | [
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_correct_nodes",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_correct_edges",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_node_attr_dict",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_edge_attr_dict",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_graph_attr_dict",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_edges_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_edges_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_reverse_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_reverse_nocopy",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_correct_nodes",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_correct_edges",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_node_attr_dict",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_edge_attr_dict",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_graph_attr_dict",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_pred_succ",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_node_labels_to_integers",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers2",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers_raise",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_copy",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_function",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_graph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_digraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multidigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_isolated_nodes_to_same",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_missing",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_copy_name",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_toposort",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_selfloop"
] | [] | BSD 3-Clause | 1,404 | 1,851 | [
"networkx/classes/digraph.py",
"networkx/relabel.py"
] |
mapbox__mapbox-sdk-py-191 | ac54c0dd453c61020f1f42eafdc3af34a7664718 | 2017-06-27 14:58:23 | d503098e549834471e0857adf5163085af6b4355 | diff --git a/mapbox/errors.py b/mapbox/errors.py
index 005796a..95d982f 100644
--- a/mapbox/errors.py
+++ b/mapbox/errors.py
@@ -46,14 +46,22 @@ class InvalidParameterError(ValidationError):
class InvalidFileError(ValidationError):
pass
+
class InvalidResourceTypeError(ValidationError):
pass
-
+
+
class InvalidPeriodError(ValidationError):
pass
+
class InvalidUsernameError(ValidationError):
pass
+
class InvalidId(ValidationError):
pass
+
+
+class MapboxDeprecationWarning(UserWarning):
+ pass
diff --git a/mapbox/services/static.py b/mapbox/services/static.py
index fe5a9fb..235a238 100644
--- a/mapbox/services/static.py
+++ b/mapbox/services/static.py
@@ -40,7 +40,8 @@ class Static(Service):
return val
def image(self, mapid, lon=None, lat=None, z=None, features=None,
- width=600, height=600, image_format='png256', sort_keys=False):
+ width=600, height=600, image_format='png256', sort_keys=False,
+ retina=False):
if lon is not None and lat is not None and z is not None:
auto = False
@@ -58,8 +59,7 @@ class Static(Service):
lat=str(lat),
z=str(z),
width=str(width),
- height=str(height),
- fmt=image_format)
+ height=str(height))
if features:
collection = normalize_geojson_featurecollection(features)
@@ -69,19 +69,25 @@ class Static(Service):
self._validate_overlay(values['overlay'])
if auto:
- pth = '/{mapid}/geojson({overlay})/auto/{width}x{height}.{fmt}'
+ pth = '/{mapid}/geojson({overlay})/auto/{width}x{height}'
else:
pth = ('/{mapid}/geojson({overlay})/{lon},{lat},{z}'
- '/{width}x{height}.{fmt}')
+ '/{width}x{height}')
else:
if auto:
raise errors.InvalidCoordError(
"Must provide features if lat, lon, z are None")
# No overlay
- pth = '/{mapid}/{lon},{lat},{z}/{width}x{height}.{fmt}'
+ pth = '/{mapid}/{lon},{lat},{z}/{width}x{height}'
uri = URITemplate(self.baseuri + pth).expand(**values)
+
+ # @2x.format handled separately to avoid HTML escaping the ampersand
+ twox = '@2x' if retina else ''
+ full_fmt = '{0}.{1}'.format(twox, image_format)
+ uri += full_fmt
+
res = self.session.get(uri)
self.handle_http_error(res)
return res
diff --git a/mapbox/services/static_style.py b/mapbox/services/static_style.py
index 50eb0b5..b0cef3a 100644
--- a/mapbox/services/static_style.py
+++ b/mapbox/services/static_style.py
@@ -1,4 +1,5 @@
import json
+import warnings
from uritemplate import URITemplate
@@ -61,13 +62,13 @@ class StaticStyle(Service):
raise errors.ImageSizeError('tile_size must be 256 or 512 pixels')
pth = '/{username}/{style_id}/tiles/{tile_size}/{z}/{x}/{y}'
- if retina:
- pth += '@2x'
values = dict(username=username, style_id=style_id,
tile_size=tile_size, z=z, x=x, y=y)
uri = URITemplate(self.baseuri + pth).expand(**values)
+ if retina:
+ uri += '@2x'
res = self.session.get(uri)
self.handle_http_error(res)
return res
@@ -80,8 +81,8 @@ class StaticStyle(Service):
return res
def image(self, username, style_id, lon=None, lat=None, zoom=None, features=None,
- pitch=0, bearing=0, width=600, height=600, twox=False, sort_keys=False,
- attribution=None, logo=None, before_layer=None):
+ pitch=0, bearing=0, width=600, height=600, retina=None, sort_keys=False,
+ attribution=None, logo=None, before_layer=None, twox=None):
params = {}
if attribution is not None:
@@ -91,6 +92,16 @@ class StaticStyle(Service):
if before_layer is not None:
params['before_layer'] = before_layer
+ # twox as a deprecated alias for retina
+ if retina is None:
+ if twox is not None:
+ warnings.warn('twox is a deprecated alias for retina',
+ errors.MapboxDeprecationWarning)
+ retina = twox
+ else:
+ if twox is not None:
+ raise errors.ValidationError('Conflicting args; Remove twox and use retina')
+
if lon is not None and lat is not None and zoom is not None:
auto = False
lat = validate_lat(lat)
@@ -112,7 +123,6 @@ class StaticStyle(Service):
lat=str(lat),
zoom=str(zoom),
auto=auto,
- twox='@2x' if twox else '',
width=str(width),
height=str(height))
@@ -126,9 +136,9 @@ class StaticStyle(Service):
pth = '/{username}/{style_id}/static/geojson({overlay})/'
if auto:
# TODO what about {bearing} and {pitch}
- pth += 'auto/{width}x{height}{twox}'
+ pth += 'auto/{width}x{height}'
else:
- pth += '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}{twox}'
+ pth += '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}'
else:
if auto:
raise errors.InvalidCoordError(
@@ -136,9 +146,14 @@ class StaticStyle(Service):
# No overlay
pth = ('/{username}/{style_id}/static/'
- '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}{twox}')
+ '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}')
uri = URITemplate(self.baseuri + pth).expand(**values)
+
+ # @2x handled separately to avoid HTML escaping the ampersand
+ if retina:
+ uri += '@2x'
+
res = self.session.get(uri, params=params)
self.handle_http_error(res)
return res
| Retina with Static Api?
Hello, it seems I can't choose the retina format for my static generated maps.
If I look in the code:
` if auto:
pth = '/{mapid}/geojson({overlay})/auto/{width}x{height}.{fmt}'`
But if I look at the global documentation, the url scheme is `{width}x{height}@2x.{fmt}`
So the is not the possibility to add retina images here? | mapbox/mapbox-sdk-py | diff --git a/tests/test_staticmaps.py b/tests/test_staticmaps.py
index 4e582d4..cb325bf 100644
--- a/tests/test_staticmaps.py
+++ b/tests/test_staticmaps.py
@@ -104,6 +104,7 @@ def test_staticmap_featurestoolarge(points):
with pytest.raises(mapbox.errors.ValidationError):
service._validate_overlay(json.dumps(points * 100))
+
def test_staticmap_imagesize():
service = mapbox.Static(access_token='pk.test')
with pytest.raises(mapbox.errors.ValidationError):
@@ -111,14 +112,32 @@ def test_staticmap_imagesize():
with pytest.raises(mapbox.errors.ValidationError):
service._validate_image_size(2000)
+
def test_latlon():
service = mapbox.Static(access_token='pk.test')
assert -179.0 == service._validate_lon(-179.0)
assert -85.0 == service._validate_lat(-85.0)
+
def test_lon_invalid():
service = mapbox.Static(access_token='pk.test')
with pytest.raises(mapbox.errors.ValidationError):
service._validate_lat(-86.0)
with pytest.raises(mapbox.errors.ValidationError):
service._validate_lon(-181.0)
+
+
[email protected]
+def test_staticmap_retina():
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/v4/mapbox.satellite/-61.7,12.1,12/[email protected]?access_token=pk.test',
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ res = mapbox.Static(access_token='pk.test').image(
+ 'mapbox.satellite', -61.7, 12.1, 12, retina=True)
+ assert res.status_code == 200
diff --git a/tests/test_staticstyle.py b/tests/test_staticstyle.py
index 1d89344..6cf9941 100644
--- a/tests/test_staticstyle.py
+++ b/tests/test_staticstyle.py
@@ -184,7 +184,7 @@ def test_bad_tilesize():
@responses.activate
-def test_staticmap_tile():
+def test_staticmap_tile_retina():
responses.add(
responses.GET,
@@ -212,3 +212,44 @@ def test_staticmap_wmts():
res = mapbox.StaticStyle(access_token='pk.test').wmts('mapbox', 'streets-v9')
assert res.status_code == 200
+
+
[email protected]
+def test_staticmap_retina():
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/styles/v1/mapbox/streets-v9/static/-61.7,12.1,12.5,75,25/600x600@2x?access_token=pk.test',
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ res = mapbox.StaticStyle(access_token='pk.test').image(
+ 'mapbox', 'streets-v9', -61.7, 12.1, 12.5, pitch=25,
+ bearing=75, retina=True)
+ assert res.status_code == 200
+
+
[email protected]
+def test_staticmap_twox_deprecated():
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/styles/v1/mapbox/streets-v9/static/-61.7,12.1,12.5,75,25/600x600@2x?access_token=pk.test',
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ with pytest.warns(mapbox.errors.MapboxDeprecationWarning):
+ res = mapbox.StaticStyle(access_token='pk.test').image(
+ 'mapbox', 'streets-v9', -61.7, 12.1, 12.5, pitch=25,
+ bearing=75, twox=True)
+ assert res.status_code == 200
+
+
+def test_staticmap_twox_deprecated_error():
+ with pytest.raises(mapbox.errors.ValidationError):
+ mapbox.StaticStyle(access_token='pk.test').image(
+ 'mapbox', 'streets-v9', -61.7, 12.1, 12.5, pitch=25,
+ bearing=75, retina=True, twox=True)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"responses",
"tox"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@ac54c0dd453c61020f1f42eafdc3af34a7664718#egg=mapbox
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack==1.0.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
polyline==1.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- platformdirs==2.4.0
- polyline==1.4.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_staticmaps.py::test_staticmap_retina",
"tests/test_staticstyle.py::test_staticmap_retina",
"tests/test_staticstyle.py::test_staticmap_twox_deprecated",
"tests/test_staticstyle.py::test_staticmap_twox_deprecated_error"
] | [] | [
"tests/test_staticmaps.py::test_staticmap_lonlatz_only",
"tests/test_staticmaps.py::test_staticmap_lonlatz_features",
"tests/test_staticmaps.py::test_staticmap_auto_features",
"tests/test_staticmaps.py::test_staticmap_auto_nofeatures",
"tests/test_staticmaps.py::test_staticmap_featurestoolarge",
"tests/test_staticmaps.py::test_staticmap_imagesize",
"tests/test_staticmaps.py::test_latlon",
"tests/test_staticmaps.py::test_lon_invalid",
"tests/test_staticstyle.py::test_staticmap_lonlatzpitchbearing",
"tests/test_staticstyle.py::test_staticmap_lonlatz_features",
"tests/test_staticstyle.py::test_staticmap_auto_features",
"tests/test_staticstyle.py::test_staticmap_auto_nofeatures",
"tests/test_staticstyle.py::test_staticmap_featurestoolarge",
"tests/test_staticstyle.py::test_staticmap_validate_bearing",
"tests/test_staticstyle.py::test_staticmap_validate_pitch",
"tests/test_staticstyle.py::test_staticmap_imagesize",
"tests/test_staticstyle.py::test_latlon",
"tests/test_staticstyle.py::test_lon_invalid",
"tests/test_staticstyle.py::test_staticmap_options",
"tests/test_staticstyle.py::test_staticmap_tile",
"tests/test_staticstyle.py::test_bad_tilesize",
"tests/test_staticstyle.py::test_staticmap_tile_retina",
"tests/test_staticstyle.py::test_staticmap_wmts"
] | [] | MIT License | 1,405 | 1,588 | [
"mapbox/errors.py",
"mapbox/services/static.py",
"mapbox/services/static_style.py"
] |
|
mapbox__mapbox-sdk-py-193 | d503098e549834471e0857adf5163085af6b4355 | 2017-06-27 16:32:09 | d503098e549834471e0857adf5163085af6b4355 | diff --git a/mapbox/services/uploads.py b/mapbox/services/uploads.py
index b085cd5..86d49d4 100644
--- a/mapbox/services/uploads.py
+++ b/mapbox/services/uploads.py
@@ -2,6 +2,7 @@ from boto3.session import Session as boto3_session
from uritemplate import URITemplate
from mapbox.errors import InvalidFileError
+from mapbox.errors import ValidationError
from mapbox.services.base import Service
@@ -45,6 +46,16 @@ class Uploader(Service):
429: "Too many requests"})
return resp
+ def _validate_tileset(self, tileset):
+ """Validate the tileset name and
+ ensure that it includes the username
+ """
+ if '.' not in tileset:
+ tileset = "{0}.{1}".format(self.username, tileset)
+ if len(tileset) > 64:
+ raise ValidationError('tileset including username must be < 64 char')
+ return tileset
+
def stage(self, fileobj, creds=None, callback=None):
"""Stages the user's file on S3
If creds are not provided, temporary credientials will be generated
@@ -88,9 +99,7 @@ class Uploader(Service):
Returns a response object where the json() contents are
an upload dict
"""
- if '.' not in tileset:
- tileset = "{0}.{1}".format(self.username, tileset)
-
+ tileset = self._validate_tileset(tileset)
account, _name = tileset.split(".")
msg = {'tileset': tileset,
@@ -166,5 +175,6 @@ class Uploader(Service):
Effectively replicates the upload functionality using the HTML form
Returns a response object where the json() is a dict with upload metadata
"""
+ tileset = self._validate_tileset(tileset)
url = self.stage(fileobj, callback=callback)
return self.create(url, tileset, name=name, patch=patch)
| Raise exception for too-long tileset names
Uploads API limits tileset names to 64 characters. Let's raise an exception before making an API call that will fail.
cc: @geografa | mapbox/mapbox-sdk-py | diff --git a/tests/test_upload.py b/tests/test_upload.py
index 0111948..3bdbf99 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -385,3 +385,26 @@ def test_upload_patch(monkeypatch):
assert res.status_code == 201
job = res.json()
assert job['tileset'] == "{0}.test1".format(username)
+
+
+def test_upload_tileset_validation():
+ with pytest.raises(mapbox.errors.ValidationError):
+ with open('tests/moors.json', 'rb') as src:
+ mapbox.Uploader(access_token=access_token).upload(
+ src, 'a' * 65, name='test1', patch=True)
+
+
+def test_upload_tileset_validation_username():
+ # even with 60 chars, the addition of the
+ # testuser username puts it over 64 chars
+ with pytest.raises(mapbox.errors.ValidationError):
+ with open('tests/moors.json', 'rb') as src:
+ mapbox.Uploader(access_token=access_token).upload(
+ src, 'a' * 60, name='test1', patch=True)
+
+
+def test_create_tileset_validation():
+ # even with 60 chars, the addition of the username puts it over 64 chars
+ with pytest.raises(mapbox.errors.ValidationError):
+ mapbox.Uploader(access_token=access_token).create(
+ 'http://example.com/test.json', 'a' * 60)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.13 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [],
"pre_install": [],
"python": "3.6",
"reqs_path": [],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@d503098e549834471e0857adf5163085af6b4355#egg=mapbox
msgpack==1.0.5
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
polyline==1.4.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- polyline==1.4.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_upload.py::test_create_tileset_validation"
] | [] | [
"tests/test_upload.py::test_get_credentials",
"tests/test_upload.py::test_create",
"tests/test_upload.py::test_create_name",
"tests/test_upload.py::test_list",
"tests/test_upload.py::test_status",
"tests/test_upload.py::test_delete",
"tests/test_upload.py::test_stage",
"tests/test_upload.py::test_big_stage",
"tests/test_upload.py::test_upload",
"tests/test_upload.py::test_upload_error",
"tests/test_upload.py::test_invalid_fileobj",
"tests/test_upload.py::test_upload_patch",
"tests/test_upload.py::test_upload_tileset_validation",
"tests/test_upload.py::test_upload_tileset_validation_username"
] | [] | MIT License | 1,407 | 470 | [
"mapbox/services/uploads.py"
] |
|
jupyter__nbgrader-778 | 7b9b431e873d1b787f269373140f0de31636b06c | 2017-06-27 22:57:08 | ed23f4484b084451da5b691df28031f39b2ce9ca | diff --git a/nbgrader/apps/baseapp.py b/nbgrader/apps/baseapp.py
index ab115149..825cc8ae 100644
--- a/nbgrader/apps/baseapp.py
+++ b/nbgrader/apps/baseapp.py
@@ -211,17 +211,6 @@ class NbGrader(JupyterApp):
cfg.Exchange.merge(cfg.TransferApp)
del cfg.TransferApp
- if 'BaseNbConvertApp' in cfg:
- self.log.warning(
- "Use BaseConverter in config, not BaseNbConvertApp. Outdated config:\n%s",
- '\n'.join(
- 'BaseNbConvertApp.{key} = {value!r}'.format(key=key, value=value)
- for key, value in cfg.BaseNbConvertApp.items()
- )
- )
- cfg.BaseConverter.merge(cfg.BaseNbConvertApp)
- del cfg.BaseNbConvertApp
-
super(NbGrader, self)._load_config(cfg, **kwargs)
if self.coursedir:
self.coursedir._load_config(cfg)
diff --git a/nbgrader/apps/nbgraderapp.py b/nbgrader/apps/nbgraderapp.py
index 23e84b3e..c5c22f8c 100755
--- a/nbgrader/apps/nbgraderapp.py
+++ b/nbgrader/apps/nbgraderapp.py
@@ -15,7 +15,6 @@ from .. import preprocessors
from .. import plugins
from ..coursedir import CourseDirectory
from .. import exchange
-from .. import converters
from .baseapp import nbgrader_aliases, nbgrader_flags
from . import (
NbGrader,
@@ -267,12 +266,6 @@ class NbGraderApp(NbGrader):
if hasattr(ex, "class_traits") and ex.class_traits(config=True):
classes.append(ex)
- # include all the converters
- for ex_name in converters.__all__:
- ex = getattr(converters, ex_name)
- if hasattr(ex, "class_traits") and ex.class_traits(config=True):
- classes.append(ex)
-
return classes
@catch_config_error
diff --git a/nbgrader/exchange/release.py b/nbgrader/exchange/release.py
index 6d1c89ca..3760d84a 100644
--- a/nbgrader/exchange/release.py
+++ b/nbgrader/exchange/release.py
@@ -4,7 +4,7 @@ from stat import (
S_IRUSR, S_IWUSR, S_IXUSR,
S_IRGRP, S_IWGRP, S_IXGRP,
S_IROTH, S_IWOTH, S_IXOTH,
- S_ISGID
+ S_ISGID, ST_MODE
)
from traitlets import Bool
@@ -17,6 +17,30 @@ class ExchangeRelease(Exchange):
force = Bool(False, help="Force overwrite existing files in the exchange.").tag(config=True)
+ def ensure_root(self):
+ perms = S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IWGRP|S_IXGRP|S_IROTH|S_IWOTH|S_IXOTH
+
+ # if root doesn't exist, create it and set permissions
+ if not os.path.exists(self.root):
+ self.log.warning("Creating exchange directory: {}".format(self.root))
+ try:
+ os.makedirs(self.root)
+ os.chmod(self.root, perms)
+ except PermissionError:
+ self.fail("Could not create {}, permission denied.".format(self.root))
+
+ else:
+ old_perms = oct(os.stat(self.root)[ST_MODE] & 0o777)
+ new_perms = oct(perms & 0o777)
+ if old_perms != new_perms:
+ self.log.warning(
+ "Permissions for exchange directory ({}) are invalid, changing them from {} to {}".format(
+ self.root, old_perms, new_perms))
+ try:
+ os.chmod(self.root, perms)
+ except PermissionError:
+ self.fail("Could not change permissions of {}, permission denied.".format(self.root))
+
def init_src(self):
self.src_path = self.coursedir.format_path(self.coursedir.release_directory, '.', self.coursedir.assignment_id)
if not os.path.isdir(self.src_path):
| Make it clearer how to set permissions for the exchange directory
If the exchange directory hasn't been created when `nbgrader release` is run for the first time, an error occurs:
```
$ nbgrader release --Exchange.root=/tmp/exchange ps1
[ReleaseApp | WARNING] No nbgrader_config.py file found (rerun with --debug to see where nbgrader is looking)
[ReleaseApp | CRITICAL] Unwritable directory, please contact your instructor: /tmp/exchange
[ReleaseApp | ERROR] nbgrader release failed
```
This is confusing and not helpful for instructors. Instead, if the exchange directory doesn't exist, `nbgrader release` should create the directory for instructors and set it to have the correct permissions. | jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_release.py b/nbgrader/tests/apps/test_nbgrader_release.py
index 93db81cb..0d8bf2dc 100644
--- a/nbgrader/tests/apps/test_nbgrader_release.py
+++ b/nbgrader/tests/apps/test_nbgrader_release.py
@@ -1,4 +1,6 @@
import os
+import shutil
+import stat
from os.path import join
from .. import run_nbgrader
@@ -55,3 +57,16 @@ class TestNbGraderRelease(BaseTestApp):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
self._release("--assignment=ps1", exchange)
assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
+ def test_no_exchange(self, exchange, course_dir):
+ shutil.rmtree(exchange)
+ self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
+ self._release("--assignment=ps1", exchange)
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
+ def test_exchange_bad_perms(self, exchange, course_dir):
+ perms = stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|stat.S_IRGRP
+ os.chmod(exchange, perms)
+ self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
+ self._release("--assignment=ps1", exchange)
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r dev-requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-rerunfailures",
"coverage",
"selenium",
"invoke",
"sphinx",
"codecov",
"cov-core",
"nbval"
],
"pre_install": [
"pip install -U pip wheel setuptools"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@7b9b431e873d1b787f269373140f0de31636b06c#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- setuptools==59.6.0
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_no_exchange"
] | [] | [
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_help",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_no_course_id",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_release",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_force_release",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_release_with_assignment_flag",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_exchange_bad_perms"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,411 | 1,014 | [
"nbgrader/apps/baseapp.py",
"nbgrader/apps/nbgraderapp.py",
"nbgrader/exchange/release.py"
] |
|
palantir__python-language-server-69 | df7d399499b0a31fd03da72c84428c7f8957935b | 2017-06-29 10:03:16 | ac405e6ff8d886bc79d7e47b1104b10f2383f4bc | diff --git a/pyls/python_ls.py b/pyls/python_ls.py
index 0cc0e40..08d4fed 100644
--- a/pyls/python_ls.py
+++ b/pyls/python_ls.py
@@ -36,7 +36,7 @@ class PythonLanguageServer(LanguageServer):
'signatureHelpProvider': {
'triggerCharacters': ['(', ',']
},
- 'textDocumentSync': lsp.TextDocumentSyncKind.FULL
+ 'textDocumentSync': lsp.TextDocumentSyncKind.INCREMENTAL
}
def initialize(self, root_path, init_opts, _process_id):
@@ -111,11 +111,12 @@ class PythonLanguageServer(LanguageServer):
self.lint(textDocument['uri'])
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
- # Since we're using a FULL document sync, there is only one change containing the whole file
- # TODO: debounce, or should this be someone else's responsibility? Probably
- self.workspace.put_document(
- textDocument['uri'], contentChanges[0]['text'], version=textDocument.get('version')
- )
+ for change in contentChanges:
+ self.workspace.update_document(
+ textDocument['uri'],
+ change,
+ version=textDocument.get('version')
+ )
self.lint(textDocument['uri'])
def m_text_document__did_save(self, textDocument=None, **_kwargs):
diff --git a/pyls/workspace.py b/pyls/workspace.py
index f7a5b06..b8381a9 100644
--- a/pyls/workspace.py
+++ b/pyls/workspace.py
@@ -1,4 +1,5 @@
# Copyright 2017 Palantir Technologies, Inc.
+import io
import logging
import os
import re
@@ -44,6 +45,10 @@ class Workspace(object):
def rm_document(self, doc_uri):
self._docs.pop(doc_uri)
+ def update_document(self, doc_uri, change, version=None):
+ self._docs[doc_uri].apply_change(change)
+ self._docs[doc_uri].version = version
+
def apply_edit(self, edit):
# Note that lang_server.call currently doesn't return anything
return self._lang_server.call(self.M_APPLY_EDIT, {'edit': edit})
@@ -98,8 +103,45 @@ class Document(object):
return f.read()
return self._source
+ def apply_change(self, change):
+ """Apply a change to the document."""
+ text = change['text']
+ change_range = change.get('range')
+
+ if not change_range:
+ # The whole file has changed
+ self._source = text
+ return
+
+ start_line = change_range['start']['line']
+ start_col = change_range['start']['character']
+ end_line = change_range['end']['line']
+ end_col = change_range['end']['character']
+
+ new = io.StringIO()
+ # Iterate over the existing document until we hit the edit range,
+ # at which point we write the new text, then loop until we hit
+ # the end of the range and continue writing.
+ for i, line in enumerate(self.lines):
+ if i < start_line:
+ new.write(line)
+ continue
+
+ if i > end_line:
+ new.write(line)
+ continue
+
+ if i == start_line:
+ new.write(line[:start_col])
+ new.write(text)
+
+ if i == end_line:
+ new.write(line[end_col:])
+
+ self._source = new.getvalue()
+
def word_at_position(self, position):
- """ Get the word under the cursor returning the start and end positions """
+ """Get the word under the cursor returning the start and end positions."""
line = self.lines[position['line']]
i = position['character']
# Split word in two
| Support incremental TextDocumentSyncKind
Right now we get full text files each time they change. This is obviously bad.
I wonder if we could then propagate these changes to only re-lint the changed parts of the file? | palantir/python-language-server | diff --git a/test/test_workspace.py b/test/test_workspace.py
index f30afa7..fa262ae 100644
--- a/test/test_workspace.py
+++ b/test/test_workspace.py
@@ -51,3 +51,32 @@ def test_non_root_project(pyls):
pyls.workspace.put_document(test_uri, 'assert True')
test_doc = pyls.workspace.get_document(test_uri)
assert project_root in pyls.workspace.syspath_for_path(test_doc.path)
+
+
+def test_document_line_edit():
+ doc = workspace.Document('file:///uri', u'itshelloworld')
+ doc.apply_change({
+ 'text': u'goodbye',
+ 'range': {
+ 'start': {'line': 0, 'character': 3},
+ 'end': {'line': 0, 'character': 8}
+ }
+ })
+ assert doc.source == u'itsgoodbyeworld'
+
+
+def test_document_multiline_edit():
+ old = [
+ "def hello(a, b):\n",
+ " print a\n",
+ " print b\n"
+ ]
+ doc = workspace.Document('file:///uri', u''.join(old))
+ doc.apply_change({'text': u'print a, b', 'range': {
+ 'start': {'line': 1, 'character': 4},
+ 'end': {'line': 2, 'character': 11}
+ }})
+ assert doc.lines == [
+ "def hello(a, b):\n",
+ " print a, b\n"
+ ]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage",
"pytest-cov"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
configparser==7.2.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
future==1.0.0
iniconfig==2.1.0
jedi==0.19.2
json-rpc==1.15.0
packaging==24.2
parso==0.8.4
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/palantir/python-language-server.git@df7d399499b0a31fd03da72c84428c7f8957935b#egg=python_language_server
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
versioneer==0.29
virtualenv==20.29.3
yapf==0.43.0
| name: python-language-server
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- configparser==7.2.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- future==1.0.0
- iniconfig==2.1.0
- jedi==0.19.2
- json-rpc==1.15.0
- packaging==24.2
- parso==0.8.4
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- versioneer==0.29
- virtualenv==20.29.3
- yapf==0.43.0
prefix: /opt/conda/envs/python-language-server
| [
"test/test_workspace.py::test_document_line_edit",
"test/test_workspace.py::test_document_multiline_edit"
] | [] | [
"test/test_workspace.py::test_local",
"test/test_workspace.py::test_put_document",
"test/test_workspace.py::test_get_document",
"test/test_workspace.py::test_rm_document",
"test/test_workspace.py::test_bad_get_document",
"test/test_workspace.py::test_uri_like",
"test/test_workspace.py::test_non_root_project"
] | [] | MIT License | 1,416 | 899 | [
"pyls/python_ls.py",
"pyls/workspace.py"
] |
|
openmrslab__suspect-65 | a70490c09196d7996ecfd99401eb68765ae25a67 | 2017-07-03 21:19:43 | 820e897294d90e08c4b91be7289e4ee9ebc6d009 | coveralls:
[](https://coveralls.io/builds/12238080)
Coverage increased (+0.2%) to 78.728% when pulling **2e5c6eaa68957507ad19205cfbc872dbb3011ace on 62_voxel_mask** into **a70490c09196d7996ecfd99401eb68765ae25a67 on master**.
| diff --git a/suspect/image/__init__.py b/suspect/image/__init__.py
index 14e78f7..cffdc25 100644
--- a/suspect/image/__init__.py
+++ b/suspect/image/__init__.py
@@ -1,1 +1,2 @@
-from suspect.image._image import *
\ No newline at end of file
+from suspect.image._image import *
+from ._mask import create_mask
\ No newline at end of file
diff --git a/suspect/image/_mask.py b/suspect/image/_mask.py
new file mode 100644
index 0000000..e31cff1
--- /dev/null
+++ b/suspect/image/_mask.py
@@ -0,0 +1,46 @@
+import numpy as np
+
+
+def create_mask(source_image, ref_image, voxels=None):
+ """
+ Creates a volumetric mask for the source_image voxel in the coordinate
+ system of the ref_image volume.
+
+ Parameters
+ ----------
+ source_image : MRSBase
+ The spectroscopy volume from which to create the mask.
+ ref_image : ImageBase
+ The reference image volume which defines the coordinate system for
+ the mask.
+
+ Returns
+ -------
+ numpy.ndarray
+ Boolean array with the same shape as ref_image, True for all voxels
+ inside source_image, false for all others.
+ """
+
+ # create a grid of coordinates for all points in the ref_image
+ # the ref_image has coord index order [z, y, x] so we reverse the shape
+ # to get the indices in (x, y, z) format for the coordinate conversion
+ ref_coords = np.mgrid[[range(0, size) for size in ref_image.shape[::-1]]]
+
+ # mgrid puts the (x, y, z) tuple at the front, we want it at the back
+ ref_coords = np.moveaxis(ref_coords, 0, -1)
+
+ # now we can apply to_scanner and from_scanner to convert from ref coords
+ # into source coords
+ scanner_coords = ref_image.to_scanner(ref_coords)
+ source_coords = source_image.from_scanner(scanner_coords)
+
+ # now check whether the source_coords are in the selected voxel
+ # TODO for now, we assume single voxel data until issue 50 is resolved
+
+ # have to transpose the result to get it to match the shape of ref_image
+ return np.all((source_coords[..., 0] < 0.5,
+ source_coords[..., 0] >= -0.5,
+ source_coords[..., 1] >= -0.5,
+ source_coords[..., 2] >= -0.5,
+ source_coords[..., 1] < 0.5,
+ source_coords[..., 2] < 0.5), axis=0).T
| ENH: Mask function for voxel on image
It should be possible to call a `create_mask()` function which produces a binary mask showing which voxels of a reference image are inside a spectroscopy voxel. In the default case this would be for all the voxels in the spectroscopy, but it should also be possible to specify a specific voxel or list of voxels to be used instead, so that masks for individual CSI voxels are also possible. | openmrslab/suspect | diff --git a/tests/test_mrs/test_image.py b/tests/test_mrs/test_image.py
new file mode 100644
index 0000000..70fca64
--- /dev/null
+++ b/tests/test_mrs/test_image.py
@@ -0,0 +1,23 @@
+import suspect
+
+import suspect._transforms
+
+import numpy as np
+
+
+def test_simple_mask():
+ source_transform = suspect._transforms.transformation_matrix([1, 0, 0],
+ [0, 1, 0],
+ [5, 0, 0],
+ [10, 10, 10])
+ ref_transform = suspect._transforms.transformation_matrix([1, 0, 0],
+ [0, 1, 0],
+ [-10, -5, -5],
+ [1, 1, 1])
+ source_volume = suspect.MRSBase(np.ones(1024), 1e-3, 123, transform=source_transform)
+ ref_volume = suspect.base.ImageBase(np.zeros((20, 20, 20)), transform=ref_transform)
+ mask = suspect.image.create_mask(source_volume, ref_volume)
+ assert ref_volume.shape == mask.shape
+ mask_target = np.zeros_like(ref_volume)
+ mask_target[0:10, 0:10, 10:20] = 1
+ np.testing.assert_equal(mask_target.astype('bool'), mask)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
asteval==0.9.26
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
lmfit==1.0.3
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
numpy==1.19.5
packaging==21.3
pandocfilters==1.5.1
parse==1.20.2
Parsley==1.3
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pydicom==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyWavelets==1.1.1
pyzmq==25.1.2
requests==2.27.1
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/openmrslab/suspect.git@a70490c09196d7996ecfd99401eb68765ae25a67#egg=suspect
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
uncertainties==3.1.7
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: suspect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- asteval==0.9.26
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- lmfit==1.0.3
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- numpy==1.19.5
- packaging==21.3
- pandocfilters==1.5.1
- parse==1.20.2
- parsley==1.3
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pydicom==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pywavelets==1.1.1
- pyzmq==25.1.2
- requests==2.27.1
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- uncertainties==3.1.7
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/suspect
| [
"tests/test_mrs/test_image.py::test_simple_mask"
] | [] | [] | [] | MIT License | 1,431 | 675 | [
"suspect/image/__init__.py"
] |
inducer__pudb-260 | fef17b6f33da7d03758c150b37cd2f84754aa01d | 2017-07-05 00:13:12 | 3f627ce0f7370ab80bc2496cb3d2364686f10efe | diff --git a/pudb/ui_tools.py b/pudb/ui_tools.py
index 9997cad..a398865 100644
--- a/pudb/ui_tools.py
+++ b/pudb/ui_tools.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
import urwid
-from urwid.util import _target_encoding
+from urwid.util import _target_encoding, calc_width
# generic urwid helpers -------------------------------------------------------
@@ -14,7 +14,7 @@ def make_canvas(txt, attr, maxcol, fill_attr=None):
# filter out zero-length attrs
line_attr = [(aname, l) for aname, l in line_attr if l > 0]
- diff = maxcol - len(line)
+ diff = maxcol - calc_width(line, 0, len(line))
if diff > 0:
line += " "*diff
line_attr.append((fill_attr, diff))
| "Canvas text is wider than the maxcol specified" with Chinese
The full script is simple:
```
data = "中文"
```
Run it with
```
pudb3 a.py
```
And press "n" I got this:
```
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/pudb/__init__.py", line 83, in runscript
dbg._runscript(mainpyfile)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 419, in _runscript
self.run(statement, globals=globals_, locals=locals_)
File "/usr/lib/python3.6/bdb.py", line 431, in run
exec(cmd, globals, locals)
File "<string>", line 1, in <module>
File "a.py", line 1, in <module>
data = "中文"
File "/usr/lib/python3.6/bdb.py", line 52, in trace_dispatch
return self.dispatch_return(frame, arg)
File "/usr/lib/python3.6/bdb.py", line 93, in dispatch_return
self.user_return(frame, arg)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 385, in user_return
self.interaction(frame)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 339, in interaction
show_exc_dialog=show_exc_dialog)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 2079, in call_with_ui
return f(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 2307, in interaction
self.event_loop()
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 2265, in event_loop
canvas = toplevel.render(self.size, focus=True)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 1083, in render
focus and self.focus_part == 'body')
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 2085, in render
focus = focus and self.focus_position == i)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 1526, in render
canv = w.render((maxcol, rows), focus=focus and item_focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 1526, in render
canv = w.render((maxcol, rows), focus=focus and item_focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/listbox.py", line 485, in render
canvas = widget.render((maxcol,))
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/pudb/var_view.py", line 163, in render
return make_canvas(text, attr, maxcol, apfx+"value")
File "/usr/lib/python3.6/site-packages/pudb/ui_tools.py", line 48, in make_canvas
maxcol=maxcol)
File "/usr/lib/python3.6/site-packages/urwid/canvas.py", line 356, in __init__
raise CanvasError("Canvas text is wider than the maxcol specified \n%r\n%r\n%r"%(maxcol,widths,text))
urwid.canvas.CanvasError: Canvas text is wider than the maxcol specified
53
[55]
[b"data: '\xe4\xb8\xad\xe6\x96\x87' "]
```
This is a Python 3.6.0 on Arch Linux, with zh_CN.UTF-8 locale. And pudb is "python-pudb 2017.1.1-1" (the pudb3 script doesn't accept `--version` nor `-V` :-( ) | inducer/pudb | diff --git a/test/test_make_canvas.py b/test/test_make_canvas.py
index 093cd63..b1ed681 100644
--- a/test/test_make_canvas.py
+++ b/test/test_make_canvas.py
@@ -49,6 +49,19 @@ def test_byte_boundary():
)
assert list(canvas.content()) == [[('var value', None, b'aaaaaa\xc3\xa9')]]
+def test_wide_chars():
+ text = u"data: '中文'"
+ canvas = make_canvas(
+ txt=[text],
+ attr=[[('var label', 6), ('var value', 4)]],
+ maxcol=47,
+ )
+ assert list(canvas.content()) == [[
+ ('var label', None, b'data: '),
+ ('var value', None, u"'中文'".encode('utf-8')),
+ (None, None, b' '*(47 - 12)), # 10 chars, 2 of which are double width
+ ]]
+
if __name__ == "__main__":
import sys
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 2017.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
-e git+https://github.com/inducer/pudb.git@fef17b6f33da7d03758c150b37cd2f84754aa01d#egg=pudb
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-mock==3.6.1
tomli==1.2.3
typing_extensions==4.1.1
urwid==2.1.2
zipp==3.6.0
| name: pudb
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urwid==2.1.2
- zipp==3.6.0
prefix: /opt/conda/envs/pudb
| [
"test/test_make_canvas.py::test_wide_chars"
] | [] | [
"test/test_make_canvas.py::test_simple",
"test/test_make_canvas.py::test_multiple",
"test/test_make_canvas.py::test_boundary",
"test/test_make_canvas.py::test_byte_boundary"
] | [] | MIT/X Consortium license | 1,433 | 225 | [
"pudb/ui_tools.py"
] |
|
streamlink__streamlink-1070 | 4761570f479ba51ffeb099a4e8a2ed3fea6df72d | 2017-07-06 16:07:35 | 0521ae3ca127f7cc600f1adcbc18b302760889ab | Vangelis66: Many thanks, but where exactly has https://github.com/streamlink/streamlink/issues/1059 gone ???
unlocKing: Looks like #1059 and #1067 have both been deleted
beardypig: Oops forgot to update the tests too...
beardypig: Perhaps the user that created them has been blocked by github for posting URLs... | diff --git a/src/streamlink/plugins/app17.py b/src/streamlink/plugins/app17.py
index d3f7b075..691d8a2a 100644
--- a/src/streamlink/plugins/app17.py
+++ b/src/streamlink/plugins/app17.py
@@ -68,23 +68,26 @@ class App17(Plugin):
self.logger.info("Stream currently unavailable.")
return
- http_url = _rtmp_re.search(res.text).group(1)
- yield "live", HTTPStream(self.session, http_url)
-
- if 'pull-rtmp' in http_url:
- url = http_url.replace("http:", "rtmp:").replace(".flv", "")
+ url = _rtmp_re.search(res.text).group(1)
+ if 'rtmp:' in url:
stream = RTMPStream(self.session, {
"rtmp": url,
"live": True
})
yield "live", stream
-
- if 'wansu-global-pull-rtmp' in http_url:
- url = http_url.replace(".flv", "/playlist.m3u8")
+ else:
+ yield "live", HTTPStream(self.session, url)
+
+ if '17app.co' in url:
+ prefix = url.replace("rtmp:", "http:").replace(".flv", "/playlist.m3u8")
+ if '/playlist.m3u8' not in prefix:
+ url = prefix + "/playlist.m3u8"
+ else:
+ url = prefix
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
yield stream
else:
- url = http_url.replace(".flv", ".m3u8")
+ url = url.replace(".flv", ".m3u8")
yield "live", HLSStream(self.session, url)
diff --git a/src/streamlink/plugins/hitbox.py b/src/streamlink/plugins/hitbox.py
index ddb45036..4d7a3e9b 100644
--- a/src/streamlink/plugins/hitbox.py
+++ b/src/streamlink/plugins/hitbox.py
@@ -178,7 +178,7 @@ class Hitbox(Plugin):
if not media_id:
res = http.get(LIVE_API.format(channel))
livestream = http.json(res, schema=_live_schema)
- if livestream.get("media_hosted_media"):
+ if livestream["media_hosted_media"]:
hosted = _live_schema.validate(livestream["media_hosted_media"])
self.logger.info("{0} is hosting {1}", livestream["media_user_name"], hosted["media_user_name"])
livestream = hosted
diff --git a/src/streamlink/plugins/npo.py b/src/streamlink/plugins/npo.py
index 840af212..f04aaade 100644
--- a/src/streamlink/plugins/npo.py
+++ b/src/streamlink/plugins/npo.py
@@ -1,13 +1,8 @@
"""Plugin for NPO: Nederlandse Publieke Omroep
Supports:
- VODs:
- - https://www.npo.nl/nos-journaal/07-07-2017/POW_03375651
- - https://www.zapp.nl/topdoks/gemist/VPWON_1276930
- - https://zappelin.nl/10-voor/gemist/VPWON_1271522
- Live:
- - https://www.npo.nl/live/npo-1
- - https://zappelin.nl/tv-kijken
+ VODs: http://www.npo.nl/het-zandkasteel/POMS_S_NTR_059963
+ Live: http://www.npo.nl/live/nederland-1
"""
import re
@@ -17,14 +12,12 @@ from streamlink.plugin.api import http
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
-from streamlink.stream import HTTPStream
from streamlink.utils import parse_json
class NPO(Plugin):
api_url = "http://ida.omroep.nl/app.php/{endpoint}"
- url_re = re.compile(r"https?://(\w+\.)?(npo\.nl|zapp\.nl|zappelin\.nl)/")
- media_id_re = re.compile(r'''<npo-player\smedia-id=["'](?P<media_id>[^"']+)["']''')
+ url_re = re.compile(r"https?://(\w+\.)?(npo.nl|zapp.nl|zappelin.nl)/")
prid_re = re.compile(r'''(?:data(-alt)?-)?prid\s*[=:]\s*(?P<q>["'])(\w+)(?P=q)''')
react_re = re.compile(r'''data-react-props\s*=\s*(?P<q>["'])(?P<data>.*?)(?P=q)''')
@@ -85,11 +78,6 @@ class NPO(Plugin):
data = parse_json(m.group("data").replace(""", '"'))
bprid = data.get("mid")
- if bprid is None:
- m = self.media_id_re.search(res.text)
- if m:
- bprid = m.group('media_id')
-
return bprid
def _get_streams(self):
@@ -103,7 +91,7 @@ class NPO(Plugin):
schema=self.streams_schema)
for stream in streams:
- if stream["format"] in ("adaptive", "hls", "mp4"):
+ if stream["format"] in ("adaptive", "hls"):
if stream["contentType"] == "url":
stream_url = stream["url"]
else:
@@ -114,10 +102,8 @@ class NPO(Plugin):
stream_url = http.json(http.get(info_url),
schema=self.stream_info_schema)
- if stream["format"] in ("adaptive", "hls"):
- for s in HLSStream.parse_variant_playlist(self.session, stream_url).items():
- yield s
- elif stream["format"] in ("mp3", "mp4"):
- yield "vod", HTTPStream(self.session, stream_url)
+ for s in HLSStream.parse_variant_playlist(self.session, stream_url).items():
+ yield s
+
__plugin__ = NPO
diff --git a/src/streamlink/plugins/tvplayer.py b/src/streamlink/plugins/tvplayer.py
index 249e85fa..f79474e6 100644
--- a/src/streamlink/plugins/tvplayer.py
+++ b/src/streamlink/plugins/tvplayer.py
@@ -15,7 +15,7 @@ class TVPlayer(Plugin):
dummy_postcode = "SE1 9LT" # location of ITV HQ in London
url_re = re.compile(r"https?://(?:www.)?tvplayer.com/(:?watch/?|watch/(.+)?)")
- stream_attrs_re = re.compile(r'data-(resource|token)\s*=\s*"(.*?)"', re.S)
+ stream_attrs_re = re.compile(r'data-(resource|token|channel-id)\s*=\s*"(.*?)"', re.S)
login_token_re = re.compile(r'input.*?name="token".*?value="(\w+)"')
stream_schema = validate.Schema({
"tvplayer": validate.Schema({
@@ -58,20 +58,22 @@ class TVPlayer(Plugin):
# there is a 302 redirect on a successful login
return res2.status_code == 302
- def _get_stream_data(self, resource, token, service=1):
+ def _get_stream_data(self, resource, channel_id, token, service=1):
# Get the context info (validation token and platform)
self.logger.debug("Getting stream information for resource={0}".format(resource))
context_res = http.get(self.context_url, params={"resource": resource,
"gen": token})
context_data = http.json(context_res, schema=self.context_schema)
+ self.logger.debug("Context data: {0}", str(context_data))
# get the stream urls
res = http.post(self.api_url, data=dict(
service=service,
- id=resource,
+ id=channel_id,
validate=context_data["validate"],
token=context_data.get("token"),
- platform=context_data["platform"]["key"]))
+ platform=context_data["platform"]["key"]),
+ raise_for_status=False)
return http.json(res, schema=self.stream_schema)
@@ -91,7 +93,8 @@ class TVPlayer(Plugin):
data=dict(postcode=self.dummy_postcode),
params=dict(return_url=self.url))
- stream_attrs = dict((k, v.strip('"')) for k, v in self.stream_attrs_re.findall(res.text))
+ stream_attrs = dict((k.replace("-", "_"), v.strip('"')) for k, v in self.stream_attrs_re.findall(res.text))
+ self.logger.debug("Got stream attrs: {0}", str(stream_attrs))
if "resource" in stream_attrs and "token" in stream_attrs:
stream_data = self._get_stream_data(**stream_attrs)
| tvplayer plugin broken
https://tvplayer.com/watch/bbcone
Unable to open URL: http://api.tvplayer.com/api/v2/stream/live (400 Client Error: Bad Request for url: http://api.tvplayer.com/api/v2/stream/live) | streamlink/streamlink | diff --git a/tests/test_plugin_tvplayer.py b/tests/test_plugin_tvplayer.py
index 52f27dc0..f9f13367 100644
--- a/tests/test_plugin_tvplayer.py
+++ b/tests/test_plugin_tvplayer.py
@@ -41,7 +41,7 @@ class TestPluginTVPlayer(unittest.TestCase):
page_resp = Mock()
page_resp.text = u"""
<div class="video-js theoplayer-skin theo-seekbar-above-controls content-box vjs-fluid"
- data-resource= "89"
+ data-resource= "bbcone"
data-token = "1324567894561268987948596154656418448489159"
data-content-type="live"
data-environment="live"
@@ -54,6 +54,7 @@ class TestPluginTVPlayer(unittest.TestCase):
mock_http.get.return_value = page_resp
hlsstream.parse_variant_playlist.return_value = {"test": HLSStream(self.session, "http://test.se/stream1")}
+ TVPlayer.bind(self.session, "test.plugin.tvplayer")
plugin = TVPlayer("http://tvplayer.com/watch/dave")
streams = plugin.get_streams()
@@ -63,7 +64,7 @@ class TestPluginTVPlayer(unittest.TestCase):
# test the url is used correctly
mock_http.get.assert_called_with("http://tvplayer.com/watch/dave")
# test that the correct API call is made
- mock_get_stream_data.assert_called_with(resource="89", token="1324567894561268987948596154656418448489159")
+ mock_get_stream_data.assert_called_with(resource="bbcone", channel_id="89", token="1324567894561268987948596154656418448489159")
# test that the correct URL is used for the HLSStream
hlsstream.parse_variant_playlist.assert_called_with(ANY, "http://test.se/stream1")
@@ -76,6 +77,7 @@ class TestPluginTVPlayer(unittest.TestCase):
"""
mock_http.get.return_value = page_resp
+ TVPlayer.bind(self.session, "test.plugin.tvplayer")
plugin = TVPlayer("http://tvplayer.com/watch/dave")
streams = plugin.get_streams()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"codecov",
"coverage",
"mock",
"pynsist",
"unittest2"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
distlib==0.3.9
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
iso-639==0.4.5
iso3166==2.1.1
Jinja2==3.0.3
linecache2==1.0.0
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycryptodome==3.21.0
pynsist==2.8
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
requests==2.27.1
requests_download==0.1.2
six==1.17.0
-e git+https://github.com/streamlink/streamlink.git@4761570f479ba51ffeb099a4e8a2ed3fea6df72d#egg=streamlink
tomli==1.2.3
traceback2==1.4.0
typing_extensions==4.1.1
unittest2==1.1.0
urllib3==1.26.20
yarg==0.1.10
zipp==3.6.0
| name: streamlink
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- distlib==0.3.9
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- iso-639==0.4.5
- iso3166==2.1.1
- jinja2==3.0.3
- linecache2==1.0.0
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycryptodome==3.21.0
- pynsist==2.8
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- requests==2.27.1
- requests-download==0.1.2
- six==1.17.0
- tomli==1.2.3
- traceback2==1.4.0
- typing-extensions==4.1.1
- unittest2==1.1.0
- urllib3==1.26.20
- yarg==0.1.10
- zipp==3.6.0
prefix: /opt/conda/envs/streamlink
| [
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_get_streams"
] | [] | [
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_can_handle_url",
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_get_invalid_page"
] | [] | BSD 2-Clause "Simplified" License | 1,437 | 2,123 | [
"src/streamlink/plugins/app17.py",
"src/streamlink/plugins/hitbox.py",
"src/streamlink/plugins/npo.py",
"src/streamlink/plugins/tvplayer.py"
] |
vertexproject__synapse-331 | 178f474d2cb47ab261cb3cdb8249f0f353e8e1c9 | 2017-07-06 23:03:05 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/lib/storm.py b/synapse/lib/storm.py
index 376aee35f..8f49cdd36 100644
--- a/synapse/lib/storm.py
+++ b/synapse/lib/storm.py
@@ -1154,20 +1154,7 @@ class Runtime(Configable):
raise s_common.BadSyntaxError(name=prop, mesg=mesg)
continue # pragma: no cover
- if prop.startswith(forms):
- valid = False
- for form in forms:
- if prop.startswith(form + ':') and core.isSetPropOk(prop):
- _prop = prop[len(form) + 1:]
- formprops[form][_prop] = valu
- valid = True
- break
- if not valid:
- mesg = 'Full prop is not valid on any lifted forms.'
- raise s_common.BadSyntaxError(name=prop, mesg=mesg)
- continue # pragma: no cover
-
- mesg = 'setprop operator requires props to start with relative or full prop names.'
+ mesg = 'setprop operator requires props to start with relative prop names.'
raise s_common.BadSyntaxError(name=prop, mesg=mesg)
for form, nodes in formnodes.items():
| setprop() should take both full props and rel props
```
setprop(foo:bar:baz=10)
- or -
setprop(:baz=10)
```
rather than
```
setprop(baz=10)
``` | vertexproject/synapse | diff --git a/synapse/tests/test_lib_storm.py b/synapse/tests/test_lib_storm.py
index 1cede3be2..e23984862 100644
--- a/synapse/tests/test_lib_storm.py
+++ b/synapse/tests/test_lib_storm.py
@@ -52,54 +52,23 @@ class StormTest(SynTest):
self.eq(node[1].get('inet:netuser'), 'vertex.link/pennywise')
self.eq(node[1].get('inet:netuser:realname'), 'robert gray')
- # Full prop val syntax
- node = core.eval('inet:netuser=vertex.link/pennywise setprop(inet:netuser:signup="1970-01-01")')[0]
- self.eq(node[1].get('inet:netuser'), 'vertex.link/pennywise')
- self.eq(node[1].get('inet:netuser:signup'), 0)
-
- # Combined syntax using both relative props and full props together
- cmd = 'inet:netuser=vertex.link/pennywise setprop(:seen:min="2000", :seen:max="2017", ' \
- 'inet:netuser:[email protected], inet:netuser:signup:ipv4="127.0.0.1")'
+ # Can set multiple props at once
+ cmd = 'inet:netuser=vertex.link/pennywise setprop(:seen:min="2000", :seen:max="2017")'
node = core.eval(cmd)[0]
self.nn(node[1].get('inet:netuser:seen:min'))
self.nn(node[1].get('inet:netuser:seen:max'))
- self.nn(node[1].get('inet:netuser:signup:ipv4'))
- self.eq(node[1].get('inet:netuser:email'), '[email protected]')
# old / bad syntax fails
# kwlist key/val syntax is no longer valid in setprop()
node = core.formTufoByProp('inet:fqdn', 'vertex.link')
bad_cmd = 'inet:fqdn=vertex.link setprop(created="2016-05-05",updated="2017/05/05")'
self.raises(BadSyntaxError, core.eval, bad_cmd)
- # a full prop which isn't valid for the node is bad
- bad_cmd = 'inet:fqdn=vertex.link setprop(inet:fqdn:typocreated="2016-05-05")'
- self.raises(BadSyntaxError, core.eval, bad_cmd)
# a rel prop which isn't valid for the node is bad
bad_cmd = 'inet:fqdn=vertex.link setprop(:typocreated="2016-05-05")'
self.raises(BadSyntaxError, core.eval, bad_cmd)
-
- # test possible form confusion
- modl = {
- 'types': (
- ('foo:bar', {'subof': 'str'}),
- ('foo:barbaz', {'subof': 'str'})
- ),
- 'forms': (
- ('foo:bar', {'ptype': 'str'}, [
- ('blah', {'ptype': 'str'})
- ]),
- ('foo:barbaz', {'ptype': 'str'}, [
- ('blah', {'ptype': 'str'})
- ]),
- )
- }
- core.addDataModel('form_confusion', modl)
- node = core.formTufoByProp('foo:bar', 'hehe')
- core.addTufoTag(node, 'confusion')
- node = core.formTufoByProp('foo:barbaz', 'haha')
- core.addTufoTag(node, 'confusion')
- node = core.eval('''#confusion setprop(foo:barbaz:blah=duck) +foo:barbaz''')[0]
- self.eq(node[1].get('foo:barbaz:blah'), 'duck')
+ # full prop syntax is not acceptable
+ bad_cmd = 'inet:netuser=vertex.link/pennywise setprop(inet:netuser:signup="1970-01-01")'
+ self.raises(BadSyntaxError, core.eval, bad_cmd)
def test_storm_filt_regex(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
coverage==6.2
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/vertexproject/synapse.git@178f474d2cb47ab261cb3cdb8249f0f353e8e1c9#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- coverage==6.2
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- nose==1.3.7
- pycparser==2.21
- pyopenssl==23.2.0
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_setprop"
] | [] | [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_alltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_cmpr_norm",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode_caching",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_deltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_edit_end",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_editmode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_filt_regex",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lift",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lifts_by",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_pivot",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_refs",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_show_help",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_fromtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_glob",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_ival",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_jointag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_query",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_totag",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior_negatives",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_default"
] | [] | Apache License 2.0 | 1,439 | 304 | [
"synapse/lib/storm.py"
] |
|
pre-commit__pre-commit-556 | 853cbecd4e15aeb59e4730320dc90fe16afa219e | 2017-07-09 03:28:10 | ce7481f75b3ece0d6d88a04f62a4c51665e0efb8 | asottile: test failures are expected, I'll fix those up | diff --git a/pre_commit/languages/all.py b/pre_commit/languages/all.py
index f441ddd..5546025 100644
--- a/pre_commit/languages/all.py
+++ b/pre_commit/languages/all.py
@@ -10,16 +10,18 @@ from pre_commit.languages import script
from pre_commit.languages import swift
from pre_commit.languages import system
-# A language implements the following constant and two functions in its module:
+# A language implements the following constant and functions in its module:
#
# # Use None for no environment
# ENVIRONMENT_DIR = 'foo_env'
#
-# def install_environment(
-# repo_cmd_runner,
-# version='default',
-# additional_dependencies=(),
-# ):
+# def get_default_version():
+# """Return a value to replace the 'default' value for language_version.
+#
+# return 'default' if there is no better option.
+# """
+#
+# def install_environment(repo_cmd_runner, version, additional_dependencies):
# """Installs a repository in the given repository. Note that the current
# working directory will already be inside the repository.
#
diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py
index 7d3f8d0..59dc1b4 100644
--- a/pre_commit/languages/docker.py
+++ b/pre_commit/languages/docker.py
@@ -14,6 +14,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'docker'
PRE_COMMIT_LABEL = 'PRE_COMMIT'
+get_default_version = helpers.basic_get_default_version
def md5(s): # pragma: windows no cover
@@ -55,9 +56,7 @@ def build_docker_image(repo_cmd_runner, **kwargs): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
assert repo_cmd_runner.exists('Dockerfile'), (
'No Dockerfile was found in the hook repository'
diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py
index c0bfbcb..ee04ca7 100644
--- a/pre_commit/languages/golang.py
+++ b/pre_commit/languages/golang.py
@@ -14,6 +14,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'golangenv'
+get_default_version = helpers.basic_get_default_version
def get_env_patch(venv):
@@ -44,11 +45,7 @@ def guess_go_dir(remote_url):
return 'unknown_src_dir'
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
helpers.assert_version_default('golang', version)
directory = repo_cmd_runner.path(
helpers.environment_dir(ENVIRONMENT_DIR, 'default'),
diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py
index a6c93de..6af77e3 100644
--- a/pre_commit/languages/helpers.py
+++ b/pre_commit/languages/helpers.py
@@ -33,3 +33,7 @@ def assert_no_additional_deps(lang, additional_deps):
'For now, pre-commit does not support '
'additional_dependencies for {}'.format(lang),
)
+
+
+def basic_get_default_version():
+ return 'default'
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
index ef557a1..b5f7c56 100644
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -12,6 +12,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'node_env'
+get_default_version = helpers.basic_get_default_version
def get_env_patch(venv): # pragma: windows no cover
@@ -34,9 +35,7 @@ def in_env(repo_cmd_runner, language_version): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
additional_dependencies = tuple(additional_dependencies)
assert repo_cmd_runner.exists('package.json')
diff --git a/pre_commit/languages/pcre.py b/pre_commit/languages/pcre.py
index 314ea09..faba539 100644
--- a/pre_commit/languages/pcre.py
+++ b/pre_commit/languages/pcre.py
@@ -2,18 +2,16 @@ from __future__ import unicode_literals
import sys
+from pre_commit.languages import helpers
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
GREP = 'ggrep' if sys.platform == 'darwin' else 'grep'
+get_default_version = helpers.basic_get_default_version
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
"""Installation for pcre type is a noop."""
raise AssertionError('Cannot install pcre repo.')
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
index 634abe5..715d585 100644
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -1,7 +1,6 @@
from __future__ import unicode_literals
import contextlib
-import distutils.spawn
import os
import sys
@@ -9,11 +8,13 @@ from pre_commit.envcontext import envcontext
from pre_commit.envcontext import UNSET
from pre_commit.envcontext import Var
from pre_commit.languages import helpers
+from pre_commit.parse_shebang import find_executable
from pre_commit.util import clean_path_on_failure
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'py_env'
+get_default_version = helpers.basic_get_default_version
def bin_dir(venv):
@@ -39,10 +40,53 @@ def in_env(repo_cmd_runner, language_version):
yield
+def _get_default_version(): # pragma: no cover (platform dependent)
+ def _norm(path):
+ _, exe = os.path.split(path.lower())
+ exe, _, _ = exe.partition('.exe')
+ if find_executable(exe) and exe not in {'python', 'pythonw'}:
+ return exe
+
+ # First attempt from `sys.executable` (or the realpath)
+ # On linux, I see these common sys.executables:
+ #
+ # system `python`: /usr/bin/python -> python2.7
+ # system `python2`: /usr/bin/python2 -> python2.7
+ # virtualenv v: v/bin/python (will not return from this loop)
+ # virtualenv v -ppython2: v/bin/python -> python2
+ # virtualenv v -ppython2.7: v/bin/python -> python2.7
+ # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
+ for path in {sys.executable, os.path.realpath(sys.executable)}:
+ exe = _norm(path)
+ if exe:
+ return exe
+
+ # Next try the `pythonX.X` executable
+ exe = 'python{}.{}'.format(*sys.version_info)
+ if find_executable(exe):
+ return exe
+
+ # Give a best-effort try for windows
+ if os.path.exists(r'C:\{}\python.exe'.format(exe.replace('.', ''))):
+ return exe
+
+ # We tried!
+ return 'default'
+
+
+def get_default_version():
+ # TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`
+ try:
+ return get_default_version.cached_version
+ except AttributeError:
+ get_default_version.cached_version = _get_default_version()
+ return get_default_version()
+
+
def norm_version(version):
if os.name == 'nt': # pragma: no cover (windows)
# Try looking up by name
- if distutils.spawn.find_executable(version):
+ if find_executable(version) and find_executable(version) != version:
return version
# If it is in the form pythonx.x search in the default
@@ -54,11 +98,7 @@ def norm_version(version):
return os.path.expanduser(version)
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py
index d3896d9..26e303c 100644
--- a/pre_commit/languages/ruby.py
+++ b/pre_commit/languages/ruby.py
@@ -16,6 +16,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'rbenv'
+get_default_version = helpers.basic_get_default_version
def get_env_patch(venv, language_version): # pragma: windows no cover
@@ -97,9 +98,7 @@ def _install_ruby(runner, version): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
diff --git a/pre_commit/languages/script.py b/pre_commit/languages/script.py
index 762ae76..c4b6593 100644
--- a/pre_commit/languages/script.py
+++ b/pre_commit/languages/script.py
@@ -5,13 +5,10 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
+get_default_version = helpers.basic_get_default_version
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
"""Installation for script type is a noop."""
raise AssertionError('Cannot install script repo.')
diff --git a/pre_commit/languages/swift.py b/pre_commit/languages/swift.py
index 4d171c5..a27dfac 100644
--- a/pre_commit/languages/swift.py
+++ b/pre_commit/languages/swift.py
@@ -10,6 +10,7 @@ from pre_commit.util import clean_path_on_failure
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'swift_env'
+get_default_version = helpers.basic_get_default_version
BUILD_DIR = '.build'
BUILD_CONFIG = 'release'
@@ -29,9 +30,7 @@ def in_env(repo_cmd_runner): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
helpers.assert_version_default('swift', version)
helpers.assert_no_additional_deps('swift', additional_dependencies)
diff --git a/pre_commit/languages/system.py b/pre_commit/languages/system.py
index c9e1c5d..3148079 100644
--- a/pre_commit/languages/system.py
+++ b/pre_commit/languages/system.py
@@ -5,13 +5,10 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
+get_default_version = helpers.basic_get_default_version
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
"""Installation for system type is a noop."""
raise AssertionError('Cannot install system repo.')
diff --git a/pre_commit/manifest.py b/pre_commit/manifest.py
index 888ad6d..081f3c6 100644
--- a/pre_commit/manifest.py
+++ b/pre_commit/manifest.py
@@ -7,6 +7,7 @@ from cached_property import cached_property
import pre_commit.constants as C
from pre_commit.clientlib import load_manifest
+from pre_commit.languages.all import languages
logger = logging.getLogger('pre_commit')
@@ -38,4 +39,10 @@ class Manifest(object):
@cached_property
def hooks(self):
- return {hook['id']: hook for hook in self.manifest_contents}
+ ret = {}
+ for hook in self.manifest_contents:
+ if hook['language_version'] == 'default':
+ language = languages[hook['language']]
+ hook['language_version'] = language.get_default_version()
+ ret[hook['id']] = hook
+ return ret
| Detect the python version when creating `default` py_envs
Given two separate virtualenvs, one running with python3 and the other running with python2, and each with the appropriate pre-commit installed (`pip install pre-commit` in the first, `pip3 install pre-commit` in the second), the check-ast plugin fails to parse Python 2 (or 3) syntax correctly, depending on which virtualenv pre-commit was installed in first.
Suggestions:
- the ~/.pre-commit cache should instead be a per virtualenv cache, rather than per user.
- instead of maintaining the cache, consider just using pip dependencies instead.
Example failure in Python 2 venv if Python 3 venv installs pre-commit first:
```
p2/main.py: failed parsing with python3.4:
Traceback (most recent call last):
File "/home/ubuntu/.pre-commit/repo_3mr61_f/py_env-default/lib/python3.4/site-packages/pre_commit_hooks/check_ast.py", line 23, in check_ast
ast.parse(open(filename, 'rb').read(), filename=filename)
File "/usr/lib/python3.4/ast.py", line 35, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "p2/main.py", line 178
except socket.error, e:
^
SyntaxError: invalid syntax
```
Example failure in Python 3 venv if Python 2 venv installs pre-commit first:
```
p3/__main__.py: failed parsing with python:
Traceback (most recent call last):
File "/home/ubuntu/.pre-commit/repoCzM4lg/py_env-default/local/lib/python2.7/site-packages/pre_commit_hooks/check_ast.py", line 23, in check_ast
ast.parse(open(filename, 'rb').read(), filename=filename)
File "/usr/lib/python2.7/ast.py", line 37, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "p3/__main__.py", line 30
"c=%s" % args.c, file=sys.stderr)
^
SyntaxError: invalid syntax
```
`rm -fr ~/.pre-commit` was run between each test.
| pre-commit/pre-commit | diff --git a/tests/languages/all_test.py b/tests/languages/all_test.py
index 73b89cb..dd1ed27 100644
--- a/tests/languages/all_test.py
+++ b/tests/languages/all_test.py
@@ -12,9 +12,7 @@ from pre_commit.languages.all import languages
def test_install_environment_argspec(language):
expected_argspec = inspect.ArgSpec(
args=['repo_cmd_runner', 'version', 'additional_dependencies'],
- varargs=None,
- keywords=None,
- defaults=('default', ()),
+ varargs=None, keywords=None, defaults=None,
)
argspec = inspect.getargspec(languages[language].install_environment)
assert argspec == expected_argspec
@@ -33,3 +31,12 @@ def test_run_hook_argpsec(language):
)
argspec = inspect.getargspec(languages[language].run_hook)
assert argspec == expected_argspec
+
+
[email protected]('language', all_languages)
+def test_get_default_version_argspec(language):
+ expected_argspec = inspect.ArgSpec(
+ args=[], varargs=None, keywords=None, defaults=None,
+ )
+ argspec = inspect.getargspec(languages[language].get_default_version)
+ assert argspec == expected_argspec
diff --git a/tests/manifest_test.py b/tests/manifest_test.py
index 7db886c..ada004f 100644
--- a/tests/manifest_test.py
+++ b/tests/manifest_test.py
@@ -11,8 +11,7 @@ from testing.util import get_head_sha
@pytest.yield_fixture
def manifest(store, tempdir_factory):
path = make_repo(tempdir_factory, 'script_hooks_repo')
- head_sha = get_head_sha(path)
- repo_path = store.clone(path, head_sha)
+ repo_path = store.clone(path, get_head_sha(path))
yield Manifest(repo_path, path)
@@ -76,3 +75,13 @@ def test_legacy_manifest_warn(store, tempdir_factory, log_warning_mock):
'If `pre-commit autoupdate` does not silence this warning consider '
'making an issue / pull request.'.format(path)
)
+
+
+def test_default_python_language_version(store, tempdir_factory):
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ repo_path = store.clone(path, get_head_sha(path))
+ manifest = Manifest(repo_path, path)
+
+ # This assertion is difficult as it is version dependent, just assert
+ # that it is *something*
+ assert manifest.hooks['foo']['language_version'] != 'default'
diff --git a/tests/repository_test.py b/tests/repository_test.py
index f91642e..7131d75 100644
--- a/tests/repository_test.py
+++ b/tests/repository_test.py
@@ -442,7 +442,7 @@ def test_venvs(tempdir_factory, store):
config = make_config_from_repo(path)
repo = Repository.create(config, store)
venv, = repo._venvs
- assert venv == (mock.ANY, 'python', 'default', [])
+ assert venv == (mock.ANY, 'python', python.get_default_version(), [])
@pytest.mark.integration
@@ -452,7 +452,7 @@ def test_additional_dependencies(tempdir_factory, store):
config['hooks'][0]['additional_dependencies'] = ['pep8']
repo = Repository.create(config, store)
venv, = repo._venvs
- assert venv == (mock.ANY, 'python', 'default', ['pep8'])
+ assert venv == (mock.ANY, 'python', python.get_default_version(), ['pep8'])
@pytest.mark.integration
@@ -591,7 +591,8 @@ def test_control_c_control_c_on_install(tempdir_factory, store):
repo.run_hook(hook, [])
# Should have made an environment, however this environment is broken!
- assert os.path.exists(repo._cmd_runner.path('py_env-default'))
+ envdir = 'py_env-{}'.format(python.get_default_version())
+ assert repo._cmd_runner.exists(envdir)
# However, it should be perfectly runnable (reinstall after botched
# install)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 12
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
cached-property==2.0.1
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
iniconfig==2.1.0
mccabe==0.7.0
mock==5.2.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/pre-commit/pre-commit.git@853cbecd4e15aeb59e4730320dc90fe16afa219e#egg=pre_commit
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
pytest-env==1.1.5
PyYAML==6.0.2
six==1.17.0
tomli==2.2.1
virtualenv==20.29.3
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- cached-property==2.0.1
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- iniconfig==2.1.0
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- pytest-env==1.1.5
- pyyaml==6.0.2
- setuptools==18.4
- six==1.17.0
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/pre-commit
| [
"tests/languages/all_test.py::test_install_environment_argspec[docker]",
"tests/languages/all_test.py::test_install_environment_argspec[golang]",
"tests/languages/all_test.py::test_install_environment_argspec[node]",
"tests/languages/all_test.py::test_install_environment_argspec[pcre]",
"tests/languages/all_test.py::test_install_environment_argspec[python]",
"tests/languages/all_test.py::test_install_environment_argspec[ruby]",
"tests/languages/all_test.py::test_install_environment_argspec[script]",
"tests/languages/all_test.py::test_install_environment_argspec[swift]",
"tests/languages/all_test.py::test_install_environment_argspec[system]",
"tests/languages/all_test.py::test_get_default_version_argspec[docker]",
"tests/languages/all_test.py::test_get_default_version_argspec[golang]",
"tests/languages/all_test.py::test_get_default_version_argspec[node]",
"tests/languages/all_test.py::test_get_default_version_argspec[pcre]",
"tests/languages/all_test.py::test_get_default_version_argspec[python]",
"tests/languages/all_test.py::test_get_default_version_argspec[ruby]",
"tests/languages/all_test.py::test_get_default_version_argspec[script]",
"tests/languages/all_test.py::test_get_default_version_argspec[swift]",
"tests/languages/all_test.py::test_get_default_version_argspec[system]",
"tests/manifest_test.py::test_default_python_language_version",
"tests/repository_test.py::test_venvs",
"tests/repository_test.py::test_additional_dependencies",
"tests/repository_test.py::test_control_c_control_c_on_install"
] | [
"tests/repository_test.py::test_switch_language_versions_doesnt_clobber",
"tests/repository_test.py::test_versioned_python_hook",
"tests/repository_test.py::test_run_a_ruby_hook",
"tests/repository_test.py::test_run_versioned_ruby_hook",
"tests/repository_test.py::test_run_ruby_hook_with_disable_shared_gems",
"tests/repository_test.py::test_golang_hook",
"tests/repository_test.py::test_additional_ruby_dependencies_installed",
"tests/repository_test.py::test_additional_golang_dependencies_installed"
] | [
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[docker]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[golang]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[node]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[pcre]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[python]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[ruby]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[script]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[swift]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[system]",
"tests/languages/all_test.py::test_run_hook_argpsec[docker]",
"tests/languages/all_test.py::test_run_hook_argpsec[golang]",
"tests/languages/all_test.py::test_run_hook_argpsec[node]",
"tests/languages/all_test.py::test_run_hook_argpsec[pcre]",
"tests/languages/all_test.py::test_run_hook_argpsec[python]",
"tests/languages/all_test.py::test_run_hook_argpsec[ruby]",
"tests/languages/all_test.py::test_run_hook_argpsec[script]",
"tests/languages/all_test.py::test_run_hook_argpsec[swift]",
"tests/languages/all_test.py::test_run_hook_argpsec[system]",
"tests/manifest_test.py::test_manifest_contents",
"tests/manifest_test.py::test_hooks",
"tests/manifest_test.py::test_legacy_manifest_warn",
"tests/repository_test.py::test_python_hook",
"tests/repository_test.py::test_python_hook_args_with_spaces",
"tests/repository_test.py::test_python_hook_weird_setup_cfg",
"tests/repository_test.py::test_run_a_node_hook",
"tests/repository_test.py::test_run_versioned_node_hook",
"tests/repository_test.py::test_system_hook_with_spaces",
"tests/repository_test.py::test_repo_with_legacy_hooks_yaml",
"tests/repository_test.py::test_missing_executable",
"tests/repository_test.py::test_missing_pcre_support",
"tests/repository_test.py::test_run_a_script_hook",
"tests/repository_test.py::test_run_hook_with_spaced_args",
"tests/repository_test.py::test_run_hook_with_curly_braced_arguments",
"tests/repository_test.py::test_pcre_hook_no_match",
"tests/repository_test.py::test_pcre_hook_matching",
"tests/repository_test.py::test_pcre_hook_case_insensitive_option",
"tests/repository_test.py::test_pcre_many_files",
"tests/repository_test.py::test_cwd_of_hook",
"tests/repository_test.py::test_lots_of_files",
"tests/repository_test.py::test_additional_dependencies_duplicated",
"tests/repository_test.py::test_additional_python_dependencies_installed",
"tests/repository_test.py::test_additional_dependencies_roll_forward",
"tests/repository_test.py::test_additional_node_dependencies_installed",
"tests/repository_test.py::test_reinstall",
"tests/repository_test.py::test_really_long_file_paths",
"tests/repository_test.py::test_config_overrides_repo_specifics",
"tests/repository_test.py::test_tags_on_repositories",
"tests/repository_test.py::test_local_repository",
"tests/repository_test.py::test_local_python_repo",
"tests/repository_test.py::test_hook_id_not_present",
"tests/repository_test.py::test_too_new_version",
"tests/repository_test.py::test_versions_ok[0.1.0]",
"tests/repository_test.py::test_versions_ok[0.15.0]"
] | [] | MIT License | 1,443 | 2,990 | [
"pre_commit/languages/all.py",
"pre_commit/languages/docker.py",
"pre_commit/languages/golang.py",
"pre_commit/languages/helpers.py",
"pre_commit/languages/node.py",
"pre_commit/languages/pcre.py",
"pre_commit/languages/python.py",
"pre_commit/languages/ruby.py",
"pre_commit/languages/script.py",
"pre_commit/languages/swift.py",
"pre_commit/languages/system.py",
"pre_commit/manifest.py"
] |
google__mobly-248 | 31dcff279d4808e011f6af8ab0661b9750357cda | 2017-07-11 11:28:03 | 31dcff279d4808e011f6af8ab0661b9750357cda | dthkao: fixes #247 | diff --git a/mobly/records.py b/mobly/records.py
index 6c5efe2..0b67c9d 100644
--- a/mobly/records.py
+++ b/mobly/records.py
@@ -14,6 +14,7 @@
"""This module is where all the record definitions and record containers live.
"""
+import itertools
import json
import logging
import pprint
@@ -283,7 +284,7 @@ class TestResult(object):
return
self.controller_info[name] = info
- def fail_class(self, test_record):
+ def add_class_error(self, test_record):
"""Add a record to indicate a test class has failed before any test
could execute.
@@ -337,7 +338,9 @@ class TestResult(object):
"""
d = {}
d['ControllerInfo'] = self.controller_info
- d['Results'] = [record.to_dict() for record in self.executed]
+ records_to_write = itertools.chain(self.passed, self.failed,
+ self.skipped, self.error)
+ d['Results'] = [record.to_dict() for record in records_to_write]
d['Summary'] = self.summary_dict()
json_str = json.dumps(d, indent=4, sort_keys=True)
return json_str
| Stacktrace is lost in test_summary.json
The reraise in base_test.py in 349-352 here loses the stacktrace:
https://github.com/google/mobly/pull/241
| google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index 355603e..e85551a 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -343,12 +343,12 @@ class BaseTestClass(object):
test_method(*args, **kwargs)
else:
test_method()
- except signals.TestPass as e:
- raise e
- except Exception as e:
+ except signals.TestPass:
+ raise
+ except Exception:
logging.exception('Exception occurred in %s.',
self.current_test_name)
- raise e
+ raise
finally:
try:
self._teardown_test(test_name)
@@ -531,8 +531,8 @@ class BaseTestClass(object):
class_record = records.TestResultRecord('setup_generated_tests',
self.TAG)
class_record.test_begin()
- class_record.test_fail(e)
- self.results.fail_class(class_record)
+ class_record.test_error(e)
+ self.results.add_class_error(class_record)
return self.results
logging.info('==========> %s <==========', self.TAG)
# Devise the actual test methods to run in the test class.
@@ -551,18 +551,18 @@ class BaseTestClass(object):
except signals.TestAbortClass as e:
# The test class is intentionally aborted.
# Skip all tests peacefully.
- e.details = 'Test class aborted due to: %s' % e.details
+ e.details = 'setup_class aborted due to: %s' % e.details
self._skip_remaining_tests(e)
return self.results
except Exception as e:
# Setup class failed for unknown reasons.
# Fail the class and skip all tests.
- logging.exception('Failed to setup %s.', self.TAG)
+ logging.exception('Error in setup_class %s.', self.TAG)
class_record = records.TestResultRecord('setup_class', self.TAG)
class_record.test_begin()
- class_record.test_fail(e)
+ class_record.test_error(e)
self._exec_procedure_func(self._on_fail, class_record)
- self.results.fail_class(class_record)
+ self.results.add_class_error(class_record)
self._skip_remaining_tests(e)
return self.results
finally:
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 65caf6f..da036ea 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -20,6 +20,10 @@ from mobly import base_test
from mobly import config_parser
from mobly import signals
+from tests.mobly import records_test
+
+validate_test_result = records_test.validate_test_result
+
MSG_EXPECTED_EXCEPTION = "This is an expected exception."
MSG_EXPECTED_TEST_FAILURE = "This is an expected test failure."
MSG_UNEXPECTED_EXCEPTION = "Unexpected exception!"
@@ -187,7 +191,9 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
actual_record = bt_cls.results.error[0]
+ validate_test_result(bt_cls.results)
self.assertEqual(actual_record.test_name, "setup_class")
+
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
expected_summary = ("Error 1, Executed 0, Failed 0, Passed 0, "
@@ -540,6 +546,7 @@ class BaseTestTest(unittest.TestCase):
signal for the entire class, which is different from raising other
exceptions in `setup_class`.
"""
+
class MockBaseTest(base_test.BaseTestClass):
def setup_class(self):
asserts.abort_class(MSG_EXPECTED_EXCEPTION)
@@ -555,6 +562,7 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run(test_names=["test_1", "test_2", "test_3"])
+ self.assertEqual(len(bt_cls.results.skipped), 3)
self.assertEqual(bt_cls.results.summary_str(),
("Error 0, Executed 0, Failed 0, Passed 0, "
"Requested 3, Skipped 3"))
@@ -966,6 +974,7 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
actual_record = bt_cls.results.error[0]
+ validate_test_result(bt_cls.results)
self.assertEqual(actual_record.test_name, "test_ha")
self.assertEqual(
actual_record.details,
diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py
index 9500d1d..f1ee1ed 100755
--- a/tests/mobly/records_test.py
+++ b/tests/mobly/records_test.py
@@ -18,6 +18,26 @@ from mobly import records
from mobly import signals
+def validate_test_result(result):
+ """Validate basic properties of a test result.
+
+ The records in each bucket of the test result should have the corresponding
+ result enum.
+
+ Args:
+ result: The TestResult object to validate.
+ """
+ buckets = [
+ (result.passed, records.TestResultEnums.TEST_RESULT_PASS),
+ (result.failed, records.TestResultEnums.TEST_RESULT_FAIL),
+ (result.error, records.TestResultEnums.TEST_RESULT_ERROR),
+ (result.skipped, records.TestResultEnums.TEST_RESULT_SKIP),
+ ]
+ for bucket_list, expected_enum in buckets:
+ for record in bucket_list:
+ assert record.result == expected_enum
+
+
class RecordsTest(unittest.TestCase):
"""This test class tests the implementation of classes in mobly.records.
"""
@@ -208,7 +228,7 @@ class RecordsTest(unittest.TestCase):
with self.assertRaisesRegexp(TypeError, expected_msg):
tr1 += "haha"
- def test_result_fail_class_with_test_signal(self):
+ def test_result_add_class_error_with_test_signal(self):
record1 = records.TestResultRecord(self.tn)
record1.test_begin()
s = signals.TestPass(self.details, self.float_extra)
@@ -217,13 +237,13 @@ class RecordsTest(unittest.TestCase):
tr.add_record(record1)
s = signals.TestFailure(self.details, self.float_extra)
record2 = records.TestResultRecord("SomeTest", s)
- tr.fail_class(record2)
+ tr.add_class_error(record2)
self.assertEqual(len(tr.passed), 1)
self.assertEqual(len(tr.error), 1)
self.assertEqual(len(tr.executed), 1)
- def test_result_fail_class_with_special_error(self):
- """Call TestResult.fail_class with an error class that requires more
+ def test_result_add_class_error_with_special_error(self):
+ """Call TestResult.add_class_error with an error class that requires more
than one arg to instantiate.
"""
record1 = records.TestResultRecord(self.tn)
@@ -239,7 +259,7 @@ class RecordsTest(unittest.TestCase):
se = SpecialError("haha", 42)
record2 = records.TestResultRecord("SomeTest", se)
- tr.fail_class(record2)
+ tr.add_class_error(record2)
self.assertEqual(len(tr.passed), 1)
self.assertEqual(len(tr.error), 1)
self.assertEqual(len(tr.executed), 1)
@@ -271,17 +291,18 @@ class RecordsTest(unittest.TestCase):
tr = records.TestResult()
tr.add_record(record1)
tr.add_record(record2)
+ validate_test_result(tr)
self.assertFalse(tr.is_all_pass)
- def test_is_all_pass_with_fail_class(self):
- """Verifies that is_all_pass yields correct value when fail_class is
+ def test_is_all_pass_with_add_class_error(self):
+ """Verifies that is_all_pass yields correct value when add_class_error is
used.
"""
record1 = records.TestResultRecord(self.tn)
record1.test_begin()
record1.test_fail(Exception("haha"))
tr = records.TestResult()
- tr.fail_class(record1)
+ tr.add_class_error(record1)
self.assertFalse(tr.is_all_pass)
def test_is_test_executed(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@31dcff279d4808e011f6af8ab0661b9750357cda#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_with_add_class_error",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_special_error",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_test_signal"
] | [] | [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_negative",
"tests/mobly/records_test.py::RecordsTest::test_is_test_executed",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_success",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_type_mismatch",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_stacktrace",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_json_extra"
] | [] | Apache License 2.0 | 1,449 | 302 | [
"mobly/records.py"
] |
asottile__add-trailing-comma-4 | 9ce37f20c644269487c52030912e20a75cc191c1 | 2017-07-11 20:46:37 | 9ce37f20c644269487c52030912e20a75cc191c1 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 9efea83..736fa7d 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -179,13 +179,16 @@ def _fix_call(call, i, tokens):
#
# func_name(arg, arg, arg)
# ^ outer paren
+ brace_start, brace_end = '(', ')'
first_paren = None
paren_stack = []
for i in range(i, len(tokens)):
token = tokens[i]
- if token.src == '(':
+ if token.src == brace_start:
paren_stack.append(i)
- elif token.src == ')':
+ # the ast lies to us about the beginning of parenthesized functions.
+ # See #3. (why we make sure there's something to pop here)
+ elif token.src == brace_end and paren_stack:
paren_stack.pop()
if (token.line, token.utf8_byte_offset) in call.arg_offsets:
@@ -194,7 +197,7 @@ def _fix_call(call, i, tokens):
else:
raise AssertionError('Past end?')
- _fix_inner('(', ')', first_paren, tokens)
+ _fix_inner(brace_start, brace_end, first_paren, tokens)
def _fix_literal(literal, i, tokens):
| "IndexError: pop from empty list" when processing valid file
(Wasn't sure how to describe this scenario better, sorry for the lame title.)
Here's a fairly minimal example:
```python
(
a
).thing(b)
```
Produces an error when processing with add-trailing-comma v0.2.0:
```python
Traceback (most recent call last):
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/bin/add-trailing-comma", line 9, in <module>
load_entry_point('add-trailing-comma==0.2.0', 'console_scripts', 'add-trailing-comma')()
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 269, in main
ret |= fix_file(filename, args)
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 249, in fix_file
contents_text = _fix_commas(contents_text, args.py35_plus)
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 232, in _fix_commas
_fix_call(call, i, tokens)
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 189, in _fix_call
paren_stack.pop()
IndexError: pop from empty list
``` | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index ddc0bc3..5d1c798 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -42,6 +42,10 @@ xfailif_lt_py35 = pytest.mark.xfail(sys.version_info < (3, 5), reason='py35+')
'x((\n'
' 1,\n'
'))',
+ # regression test for #3
+ '(\n'
+ ' a\n'
+ ').f(b)',
),
)
def test_fix_calls_noops(src):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@9ce37f20c644269487c52030912e20a75cc191c1#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n"
] | [] | [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
] | [] | MIT License | 1,450 | 326 | [
"add_trailing_comma.py"
] |
|
google__mobly-258 | c9ba28477626c6f7d5365bec019646f915a5bd2d | 2017-07-12 18:30:24 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | diff --git a/mobly/controllers/android_device_lib/callback_handler.py b/mobly/controllers/android_device_lib/callback_handler.py
index 4207f47..1ab67d8 100644
--- a/mobly/controllers/android_device_lib/callback_handler.py
+++ b/mobly/controllers/android_device_lib/callback_handler.py
@@ -83,13 +83,14 @@ class CallbackHandler(object):
(timeout, MAX_TIMEOUT))
timeout *= 1000 # convert to milliseconds for java side
try:
- raw_event = self._event_client.eventWaitAndGet(self._id,
- event_name, timeout)
+ raw_event = self._event_client.eventWaitAndGet(
+ self._id, event_name, timeout)
except Exception as e:
if 'EventSnippetException: timeout.' in str(e):
raise TimeoutError(
- 'Timeout waiting for event "%s" triggered by %s (%s).' %
- (event_name, self._method_name, self._id))
+ 'Timed out after waiting %ss for event "%s" triggered by'
+ ' %s (%s).' % (timeout, event_name, self._method_name,
+ self._id))
raise
return snippet_event.from_dict(raw_event)
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
index 3d85e40..f7f473b 100644
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -24,15 +24,27 @@ _INSTRUMENTATION_RUNNER_PACKAGE = (
'com.google.android.mobly.snippet.SnippetRunner')
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
-_LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +
+_LAUNCH_CMD_V0 = ('%s am instrument -w -e action start -e port %s %s/' +
_INSTRUMENTATION_RUNNER_PACKAGE)
_LAUNCH_CMD_V1 = (
- 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+ '%s am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
_STOP_CMD = (
'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+# Test that uses UiAutomation requires the shell session to be maintained while
+# test is in progress. However, this requirement does not hold for the test that
+# deals with device USB disconnection (Once device disconnects, the shell
+# session that started the instrument ends, and UiAutomation fails with error:
+# "UiAutomation not connected"). To keep the shell session and redirect
+# stdin/stdout/stderr, use "setsid" or "nohup" while launching the
+# instrumentation test. Because these commands may not be available in every
+# android system, try to use them only if exists.
+_SETSID_COMMAND = 'setsid'
+
+_NOHUP_COMMAND = 'nohup'
+
# Maximum time to wait for a v0 snippet to start on the device (10 minutes).
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
_APP_START_WAIT_TIME_V0 = 10 * 60
@@ -60,7 +72,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
def __init__(self, package, adb_proxy, log=logging.getLogger()):
"""Initializes a SnippetClient.
-
+
Args:
package: (str) The package name of the apk where the snippets are
defined.
@@ -77,13 +89,14 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
"""Overrides superclass. Launches a snippet app and connects to it."""
self._check_app_installed()
+ persists_shell_cmd = self._get_persist_command()
# Try launching the app with the v1 protocol. If that fails, fall back
# to v0 for compatibility. Use info here so people know exactly what's
# happening here, which is helpful since they need to create their own
# instrumentations and manifest.
self.log.info('Launching snippet apk %s with protocol v1',
self.package)
- cmd = _LAUNCH_CMD_V1 % self.package
+ cmd = _LAUNCH_CMD_V1 % (persists_shell_cmd, self.package)
start_time = time.time()
self._proc = self._do_start_app(cmd)
@@ -106,7 +119,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
# Reuse the host port as the device port in v0 snippet. This isn't
# safe in general, but the protocol is deprecated.
self.device_port = self.host_port
- cmd = _LAUNCH_CMD_V0 % (self.device_port, self.package)
+ cmd = _LAUNCH_CMD_V0 % (persists_shell_cmd, self.device_port, self.package)
self._proc = self._do_start_app(cmd)
self._connect_to_v0()
self._launch_version = 'v0'
@@ -291,3 +304,17 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
return line
self.log.debug('Discarded line from instrumentation output: "%s"',
line)
+
+ def _get_persist_command(self):
+ """Check availability and return path of command if available."""
+ for command in [_SETSID_COMMAND, _NOHUP_COMMAND]:
+ try:
+ if command in self._adb.shell('which %s' % command):
+ return command
+ except adb.AdbError:
+ continue
+ self.log.warning('No %s and %s commands available to launch instrument '
+ 'persistently, tests that depend on UiAutomator and '
+ 'at the same time performs USB disconnection may fail',
+ _SETSID_COMMAND, _NOHUP_COMMAND)
+ return ''
| Exceptions from `CallbackHandler` should include timeout value
Right now some timeout exceptions thrown by `CallbackHandler` do not include how long the timeout was, making debugging more difficult. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index e85551a..b3ccf43 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -553,6 +553,7 @@ class BaseTestClass(object):
# Skip all tests peacefully.
e.details = 'setup_class aborted due to: %s' % e.details
self._skip_remaining_tests(e)
+ self._safe_exec_func(self.teardown_class)
return self.results
except Exception as e:
# Setup class failed for unknown reasons.
@@ -564,9 +565,8 @@ class BaseTestClass(object):
self._exec_procedure_func(self._on_fail, class_record)
self.results.add_class_error(class_record)
self._skip_remaining_tests(e)
- return self.results
- finally:
self._safe_exec_func(self.teardown_class)
+ return self.results
# Run tests in order.
try:
for test_name, test_method in tests:
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 725dcda..db615dd 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -38,7 +38,6 @@ class SomeError(Exception):
class BaseTestTest(unittest.TestCase):
-
def setUp(self):
self.mock_test_cls_configs = config_parser.TestRunConfig()
self.mock_test_cls_configs.log_path = '/tmp'
@@ -566,6 +565,25 @@ class BaseTestTest(unittest.TestCase):
("Error 0, Executed 0, Failed 0, Passed 0, "
"Requested 3, Skipped 3"))
+ def test_setup_and_teardown_execution_count(self):
+ class MockBaseTest(base_test.BaseTestClass):
+ def test_func(self):
+ pass
+
+ def test_func2(self):
+ pass
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.setup_class = mock.Mock()
+ bt_cls.teardown_class = mock.Mock()
+ bt_cls.setup_test = mock.Mock()
+ bt_cls.teardown_test = mock.Mock()
+ bt_cls.run()
+ self.assertEqual(bt_cls.setup_class.call_count, 1)
+ self.assertEqual(bt_cls.teardown_class.call_count, 1)
+ self.assertEqual(bt_cls.setup_test.call_count, 2)
+ self.assertEqual(bt_cls.teardown_test.call_count, 2)
+
def test_abort_class_in_test(self):
class MockBaseTest(base_test.BaseTestClass):
def test_1(self):
diff --git a/tests/mobly/controllers/android_device_lib/callback_handler_test.py b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
index a701d51..f288ef3 100755
--- a/tests/mobly/controllers/android_device_lib/callback_handler_test.py
+++ b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
@@ -34,6 +34,7 @@ MOCK_RAW_EVENT = {
class CallbackHandlerTest(unittest.TestCase):
"""Unit tests for mobly.controllers.android_device_lib.callback_handler.
"""
+
def test_timeout_value(self):
self.assertGreaterEqual(jsonrpc_client_base._SOCKET_READ_TIMEOUT,
callback_handler.MAX_TIMEOUT)
@@ -64,9 +65,9 @@ class CallbackHandlerTest(unittest.TestCase):
event_client=mock_event_client,
ret_value=None,
method_name=None)
- expected_msg = 'Timeout waiting for event "ha" .*'
+ expected_msg = 'Timed out after waiting .*s for event "ha" .*'
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitAndGet('ha')
def test_wait_for_event(self):
@@ -101,7 +102,7 @@ class CallbackHandlerTest(unittest.TestCase):
return False
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitForEvent('AsyncTaskResult', some_condition, 0.01)
diff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
index 010064c..beb9262 100755
--- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py
+++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
@@ -18,6 +18,7 @@ from builtins import bytes
import mock
from future.tests.base import unittest
+from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib import jsonrpc_client_base
from mobly.controllers.android_device_lib import snippet_client
from tests.lib import jsonrpc_client_test_base
@@ -51,6 +52,8 @@ class MockAdbProxy(object):
return bytes('instrumentation:{p}/{r} (target={p})'.format(
p=MOCK_PACKAGE_NAME,
r=snippet_client._INSTRUMENTATION_RUNNER_PACKAGE), 'utf-8')
+ elif 'which' in params:
+ return ''
def __getattr__(self, name):
"""All calls to the none-existent functions in adb proxy would
@@ -175,6 +178,73 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase):
client.start_app_and_connect()
self.assertEqual(123, client.device_port)
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._do_start_app')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._check_app_installed')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._read_protocol_line')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._connect_to_v1')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'utils.get_available_host_port')
+ def test_snippet_start_app_and_connect_v1_persistent_session(
+ self, mock_get_port, mock_connect_to_v1, mock_read_protocol_line,
+ mock_check_app_installed, mock_do_start_app):
+
+ def _mocked_shell(arg):
+ if 'setsid' in arg:
+ raise adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code')
+ else:
+ return 'nohup'
+
+ mock_get_port.return_value = 123
+ mock_read_protocol_line.side_effect = [
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ ]
+
+ # Test 'setsid' exists
+ client = self._make_client()
+ client._adb.shell = mock.Mock(return_value='setsid')
+ client.start_app_and_connect()
+ cmd_setsid = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._SETSID_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls(mock.call(cmd_setsid))
+
+ # Test 'setsid' does not exist, but 'nohup' exsits
+ client = self._make_client()
+ client._adb.shell = _mocked_shell
+ client.start_app_and_connect()
+ cmd_nohup = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._NOHUP_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup)
+ ])
+
+ # Test both 'setsid' and 'nohup' do not exist
+ client._adb.shell = mock.Mock(
+ side_effect=adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code'))
+ client = self._make_client()
+ client.start_app_and_connect()
+ cmd_not_persist = ' am instrument -w -e action start %s/%s' % (
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup),
+ mock.call(cmd_not_persist)
+ ])
+
@mock.patch('socket.create_connection')
@mock.patch('mobly.controllers.android_device_lib.snippet_client.'
'utils.start_standing_subprocess')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y adb python3-setuptools"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@c9ba28477626c6f7d5365bec019646f915a5bd2d#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- execnet==2.1.1
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
- typing-extensions==4.13.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_and_get_timeout",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_persistent_session"
] | [] | [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_event_dict_to_snippet_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_timeout_value",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event_negative",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client"
] | [] | Apache License 2.0 | 1,454 | 1,393 | [
"mobly/controllers/android_device_lib/callback_handler.py",
"mobly/controllers/android_device_lib/snippet_client.py"
] |
|
google__mobly-263 | 067621c8fbb771b35cf0a7c6de6c42a6c452321f | 2017-07-13 23:05:56 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | dthkao:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/base_test.py, line 359 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0u61nmBZ4dqzUVPiF:-Kp0u61oruK5KEpK27tn:b982v3x) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L359)):*
> ```Python
> except Exception as e:
> logging.exception(e)
> tr_record.add_error('teardown_test', e)
> ```
So this is a deeper issue, but if a test fails in the test case, and is marked as fail, and then teardown throws an error because part of the test wasn't executed, the result now becomes an error. Is that intended?
---
*[mobly/base_test.py, line 378 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0sYZQ6VbNc8OP1KcQ:-Kp0sYZQ6VbNc8OP1KcR:bgoqojn) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L378)):*
> ```Python
> tr_record.test_pass()
> finally:
> if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR,
> ```
Tangential, but is there any need for a distinction between what should be done if an error occurred vs a failure? Do we need a self.on_error()?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/263)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/base_test.py, line 359 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0u61nmBZ4dqzUVPiF:-Kp0zsfLmcc91mE7p0S_:b-vlp9v4) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L359)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
So this is a deeper issue, but if a test fails in the test case, and is marked as fail, and then teardown throws an error because part of the test wasn't executed, the result now becomes an error. Is that intended?
</blockquote></details>
This has always been the expected behavior and is not changed in this PR.
We specifically did this to be consistent with pyunit.
---
*[mobly/base_test.py, line 378 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0sYZQ6VbNc8OP1KcQ:-Kp1-4WKVsMsfpArDYuc:b7r6m37) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L378)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
Tangential, but is there any need for a distinction between what should be done if an error occurred vs a failure? Do we need a self.on_error()?
</blockquote></details>
We specifically removed `on_error` at one point...
Because we found out that everybdoy just duped the code between on_fail and on_error with no difference...
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/263)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device_lib/callback_handler.py b/mobly/controllers/android_device_lib/callback_handler.py
index 4207f47..1ab67d8 100644
--- a/mobly/controllers/android_device_lib/callback_handler.py
+++ b/mobly/controllers/android_device_lib/callback_handler.py
@@ -83,13 +83,14 @@ class CallbackHandler(object):
(timeout, MAX_TIMEOUT))
timeout *= 1000 # convert to milliseconds for java side
try:
- raw_event = self._event_client.eventWaitAndGet(self._id,
- event_name, timeout)
+ raw_event = self._event_client.eventWaitAndGet(
+ self._id, event_name, timeout)
except Exception as e:
if 'EventSnippetException: timeout.' in str(e):
raise TimeoutError(
- 'Timeout waiting for event "%s" triggered by %s (%s).' %
- (event_name, self._method_name, self._id))
+ 'Timed out after waiting %ss for event "%s" triggered by'
+ ' %s (%s).' % (timeout, event_name, self._method_name,
+ self._id))
raise
return snippet_event.from_dict(raw_event)
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
index 3d85e40..f7f473b 100644
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -24,15 +24,27 @@ _INSTRUMENTATION_RUNNER_PACKAGE = (
'com.google.android.mobly.snippet.SnippetRunner')
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
-_LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +
+_LAUNCH_CMD_V0 = ('%s am instrument -w -e action start -e port %s %s/' +
_INSTRUMENTATION_RUNNER_PACKAGE)
_LAUNCH_CMD_V1 = (
- 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+ '%s am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
_STOP_CMD = (
'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+# Test that uses UiAutomation requires the shell session to be maintained while
+# test is in progress. However, this requirement does not hold for the test that
+# deals with device USB disconnection (Once device disconnects, the shell
+# session that started the instrument ends, and UiAutomation fails with error:
+# "UiAutomation not connected"). To keep the shell session and redirect
+# stdin/stdout/stderr, use "setsid" or "nohup" while launching the
+# instrumentation test. Because these commands may not be available in every
+# android system, try to use them only if exists.
+_SETSID_COMMAND = 'setsid'
+
+_NOHUP_COMMAND = 'nohup'
+
# Maximum time to wait for a v0 snippet to start on the device (10 minutes).
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
_APP_START_WAIT_TIME_V0 = 10 * 60
@@ -60,7 +72,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
def __init__(self, package, adb_proxy, log=logging.getLogger()):
"""Initializes a SnippetClient.
-
+
Args:
package: (str) The package name of the apk where the snippets are
defined.
@@ -77,13 +89,14 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
"""Overrides superclass. Launches a snippet app and connects to it."""
self._check_app_installed()
+ persists_shell_cmd = self._get_persist_command()
# Try launching the app with the v1 protocol. If that fails, fall back
# to v0 for compatibility. Use info here so people know exactly what's
# happening here, which is helpful since they need to create their own
# instrumentations and manifest.
self.log.info('Launching snippet apk %s with protocol v1',
self.package)
- cmd = _LAUNCH_CMD_V1 % self.package
+ cmd = _LAUNCH_CMD_V1 % (persists_shell_cmd, self.package)
start_time = time.time()
self._proc = self._do_start_app(cmd)
@@ -106,7 +119,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
# Reuse the host port as the device port in v0 snippet. This isn't
# safe in general, but the protocol is deprecated.
self.device_port = self.host_port
- cmd = _LAUNCH_CMD_V0 % (self.device_port, self.package)
+ cmd = _LAUNCH_CMD_V0 % (persists_shell_cmd, self.device_port, self.package)
self._proc = self._do_start_app(cmd)
self._connect_to_v0()
self._launch_version = 'v0'
@@ -291,3 +304,17 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
return line
self.log.debug('Discarded line from instrumentation output: "%s"',
line)
+
+ def _get_persist_command(self):
+ """Check availability and return path of command if available."""
+ for command in [_SETSID_COMMAND, _NOHUP_COMMAND]:
+ try:
+ if command in self._adb.shell('which %s' % command):
+ return command
+ except adb.AdbError:
+ continue
+ self.log.warning('No %s and %s commands available to launch instrument '
+ 'persistently, tests that depend on UiAutomator and '
+ 'at the same time performs USB disconnection may fail',
+ _SETSID_COMMAND, _NOHUP_COMMAND)
+ return ''
| Procedure functions triggered incorrectly
If a test throws an exception, and the teardown also throws an exception, `on_fail` is executed twice. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index b3ccf43..7353afd 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -357,14 +357,11 @@ class BaseTestClass(object):
except Exception as e:
logging.exception(e)
tr_record.add_error('teardown_test', e)
- self._exec_procedure_func(self._on_fail, tr_record)
except (signals.TestFailure, AssertionError) as e:
tr_record.test_fail(e)
- self._exec_procedure_func(self._on_fail, tr_record)
except signals.TestSkip as e:
# Test skipped.
tr_record.test_skip(e)
- self._exec_procedure_func(self._on_skip, tr_record)
except (signals.TestAbortClass, signals.TestAbortAll) as e:
# Abort signals, pass along.
tr_record.test_fail(e)
@@ -372,15 +369,19 @@ class BaseTestClass(object):
except signals.TestPass as e:
# Explicit test pass.
tr_record.test_pass(e)
- self._exec_procedure_func(self._on_pass, tr_record)
except Exception as e:
# Exception happened during test.
tr_record.test_error(e)
- self._exec_procedure_func(self._on_fail, tr_record)
else:
tr_record.test_pass()
- self._exec_procedure_func(self._on_pass, tr_record)
finally:
+ if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR,
+ records.TestResultEnums.TEST_RESULT_FAIL):
+ self._exec_procedure_func(self._on_fail, tr_record)
+ elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
+ self._exec_procedure_func(self._on_pass, tr_record)
+ elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
+ self._exec_procedure_func(self._on_skip, tr_record)
self.results.add_record(tr_record)
def _assert_function_name_in_stack(self, expected_func_name):
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index db615dd..35c29f2 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -201,7 +201,12 @@ class BaseTestTest(unittest.TestCase):
on_fail_call_check.assert_called_once_with("haha")
def test_setup_test_fail_by_exception(self):
+ mock_on_fail = mock.Mock()
+
class MockBaseTest(base_test.BaseTestClass):
+ def on_fail(self, *args):
+ mock_on_fail('on_fail')
+
def setup_test(self):
raise Exception(MSG_EXPECTED_EXCEPTION)
@@ -211,6 +216,7 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run(test_names=["test_something"])
+ mock_on_fail.assert_called_once_with('on_fail')
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
@@ -328,6 +334,9 @@ class BaseTestTest(unittest.TestCase):
def teardown_test(self):
my_mock("teardown_test")
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
def test_something(self):
raise Exception(MSG_EXPECTED_EXCEPTION)
@@ -349,6 +358,9 @@ class BaseTestTest(unittest.TestCase):
def on_fail(self, test_name, begin_time):
my_mock("on_fail")
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
def teardown_test(self):
raise Exception(MSG_EXPECTED_EXCEPTION)
@@ -373,6 +385,9 @@ class BaseTestTest(unittest.TestCase):
def on_fail(self, test_name, begin_time):
my_mock("on_fail")
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
def test_something(self):
asserts.assert_true(False, MSG_EXPECTED_EXCEPTION)
@@ -387,6 +402,35 @@ class BaseTestTest(unittest.TestCase):
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
+ def test_on_fail_executed_if_both_test_and_teardown_test_fails(self):
+ on_fail_mock = mock.MagicMock()
+
+ class MockBaseTest(base_test.BaseTestClass):
+ def on_fail(self, test_name, begin_time):
+ on_fail_mock("on_fail")
+
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
+ def teardown_test(self):
+ raise Exception(MSG_EXPECTED_EXCEPTION + 'ha')
+
+ def test_something(self):
+ raise Exception(MSG_EXPECTED_EXCEPTION)
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run()
+ on_fail_mock.assert_called_once_with("on_fail")
+ actual_record = bt_cls.results.error[0]
+ self.assertEqual(actual_record.test_name, self.mock_test_name)
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
+ self.assertEqual(actual_record.extra_errors,
+ {'teardown_test': 'This is an expected exception.ha'})
+ self.assertIsNone(actual_record.extras)
+ expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
+ "Requested 1, Skipped 0")
+ self.assertEqual(bt_cls.results.summary_str(), expected_summary)
+
def test_on_fail_executed_if_test_setup_fails_by_exception(self):
my_mock = mock.MagicMock()
diff --git a/tests/mobly/controllers/android_device_lib/callback_handler_test.py b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
index a701d51..f288ef3 100755
--- a/tests/mobly/controllers/android_device_lib/callback_handler_test.py
+++ b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
@@ -34,6 +34,7 @@ MOCK_RAW_EVENT = {
class CallbackHandlerTest(unittest.TestCase):
"""Unit tests for mobly.controllers.android_device_lib.callback_handler.
"""
+
def test_timeout_value(self):
self.assertGreaterEqual(jsonrpc_client_base._SOCKET_READ_TIMEOUT,
callback_handler.MAX_TIMEOUT)
@@ -64,9 +65,9 @@ class CallbackHandlerTest(unittest.TestCase):
event_client=mock_event_client,
ret_value=None,
method_name=None)
- expected_msg = 'Timeout waiting for event "ha" .*'
+ expected_msg = 'Timed out after waiting .*s for event "ha" .*'
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitAndGet('ha')
def test_wait_for_event(self):
@@ -101,7 +102,7 @@ class CallbackHandlerTest(unittest.TestCase):
return False
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitForEvent('AsyncTaskResult', some_condition, 0.01)
diff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
index 010064c..beb9262 100755
--- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py
+++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
@@ -18,6 +18,7 @@ from builtins import bytes
import mock
from future.tests.base import unittest
+from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib import jsonrpc_client_base
from mobly.controllers.android_device_lib import snippet_client
from tests.lib import jsonrpc_client_test_base
@@ -51,6 +52,8 @@ class MockAdbProxy(object):
return bytes('instrumentation:{p}/{r} (target={p})'.format(
p=MOCK_PACKAGE_NAME,
r=snippet_client._INSTRUMENTATION_RUNNER_PACKAGE), 'utf-8')
+ elif 'which' in params:
+ return ''
def __getattr__(self, name):
"""All calls to the none-existent functions in adb proxy would
@@ -175,6 +178,73 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase):
client.start_app_and_connect()
self.assertEqual(123, client.device_port)
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._do_start_app')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._check_app_installed')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._read_protocol_line')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._connect_to_v1')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'utils.get_available_host_port')
+ def test_snippet_start_app_and_connect_v1_persistent_session(
+ self, mock_get_port, mock_connect_to_v1, mock_read_protocol_line,
+ mock_check_app_installed, mock_do_start_app):
+
+ def _mocked_shell(arg):
+ if 'setsid' in arg:
+ raise adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code')
+ else:
+ return 'nohup'
+
+ mock_get_port.return_value = 123
+ mock_read_protocol_line.side_effect = [
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ ]
+
+ # Test 'setsid' exists
+ client = self._make_client()
+ client._adb.shell = mock.Mock(return_value='setsid')
+ client.start_app_and_connect()
+ cmd_setsid = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._SETSID_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls(mock.call(cmd_setsid))
+
+ # Test 'setsid' does not exist, but 'nohup' exsits
+ client = self._make_client()
+ client._adb.shell = _mocked_shell
+ client.start_app_and_connect()
+ cmd_nohup = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._NOHUP_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup)
+ ])
+
+ # Test both 'setsid' and 'nohup' do not exist
+ client._adb.shell = mock.Mock(
+ side_effect=adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code'))
+ client = self._make_client()
+ client.start_app_and_connect()
+ cmd_not_persist = ' am instrument -w -e action start %s/%s' % (
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup),
+ mock.call(cmd_not_persist)
+ ])
+
@mock.patch('socket.create_connection')
@mock.patch('mobly.controllers.android_device_lib.snippet_client.'
'utils.start_standing_subprocess')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
future==1.0.0
iniconfig==2.1.0
-e git+https://github.com/google/mobly.git@067621c8fbb771b35cf0a7c6de6c42a6c452321f#egg=mobly
mock==1.0.1
packaging==24.2
pluggy==1.5.0
portpicker==1.6.0
psutil==7.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli==2.2.1
typing_extensions==4.13.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- future==1.0.0
- iniconfig==2.1.0
- mock==1.0.1
- packaging==24.2
- pluggy==1.5.0
- portpicker==1.6.0
- psutil==7.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_and_get_timeout",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_persistent_session"
] | [] | [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_event_dict_to_snippet_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_timeout_value",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event_negative",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client"
] | [] | Apache License 2.0 | 1,459 | 1,393 | [
"mobly/controllers/android_device_lib/callback_handler.py",
"mobly/controllers/android_device_lib/snippet_client.py"
] |
asottile__add-trailing-comma-8 | 8d87f678b13ac1497b688173e94d21d8371746dc | 2017-07-14 02:56:11 | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 49e98c7..bd16709 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -15,9 +15,10 @@ from tokenize_rt import UNIMPORTANT_WS
Offset = collections.namedtuple('Offset', ('line', 'utf8_byte_offset'))
Call = collections.namedtuple('Call', ('node', 'star_args', 'arg_offsets'))
-Func = collections.namedtuple('Func', ('node', 'arg_offsets'))
+Func = collections.namedtuple('Func', ('node', 'star_args', 'arg_offsets'))
Literal = collections.namedtuple('Literal', ('node', 'braces', 'backtrack'))
Literal.__new__.__defaults__ = (False,)
+Fix = collections.namedtuple('Fix', ('braces', 'initial_indent'))
NEWLINES = frozenset(('NEWLINE', 'NL'))
NON_CODING_TOKENS = frozenset(('COMMENT', 'NL', UNIMPORTANT_WS))
@@ -141,28 +142,39 @@ class FindNodes(ast.NodeVisitor):
self.generic_visit(node)
def visit_FunctionDef(self, node):
- has_starargs = (
- node.args.vararg or node.args.kwarg or
- # python 3 only
- getattr(node.args, 'kwonlyargs', None)
- )
+ has_starargs = False
+ args = list(node.args.args)
+
+ if node.args.vararg:
+ if isinstance(node.args.vararg, ast.AST): # pragma: no cover (py3)
+ args.append(node.args.vararg)
+ has_starargs = True
+ if node.args.kwarg:
+ if isinstance(node.args.kwarg, ast.AST): # pragma: no cover (py3)
+ args.append(node.args.kwarg)
+ has_starargs = True
+ py3_kwonlyargs = getattr(node.args, 'kwonlyargs', None)
+ if py3_kwonlyargs: # pragma: no cover (py3)
+ args.extend(py3_kwonlyargs)
+ has_starargs = True
+
orig = node.lineno
is_multiline = False
offsets = set()
- for argnode in node.args.args:
+ for argnode in args:
offset = _to_offset(argnode)
if offset.line > orig:
is_multiline = True
offsets.add(offset)
- if is_multiline and not has_starargs:
+ if is_multiline:
key = Offset(node.lineno, node.col_offset)
- self.funcs[key] = Func(node, offsets)
+ self.funcs[key] = Func(node, has_starargs, offsets)
self.generic_visit(node)
-def _fix_inner(brace_start, brace_end, first_brace, tokens):
+def _find_simple(brace_start, brace_end, first_brace, tokens):
brace_stack = [first_brace]
for i in range(first_brace + 1, len(tokens)):
@@ -183,12 +195,6 @@ def _fix_inner(brace_start, brace_end, first_brace, tokens):
if tokens[first_brace].line == tokens[last_brace].line:
return
- # Figure out if either of the braces are "hugging"
- hug_open = tokens[first_brace + 1].name not in NON_CODING_TOKENS
- hug_close = tokens[last_brace - 1].name not in NON_CODING_TOKENS
- if hug_open and tokens[last_brace - 1].src in END_BRACES:
- hug_open = hug_close = False
-
# determine the initial indentation
i = first_brace
while i >= 0 and tokens[i].name not in NEWLINES:
@@ -199,51 +205,10 @@ def _fix_inner(brace_start, brace_end, first_brace, tokens):
else:
initial_indent = 0
- # fix open hugging
- if hug_open:
- new_indent = initial_indent + 4
-
- tokens[first_brace + 1:first_brace + 1] = [
- Token('NL', '\n'), Token(UNIMPORTANT_WS, ' ' * new_indent),
- ]
- last_brace += 2
-
- # Adust indentation for the rest of the things
- min_indent = None
- indents = []
- for i in range(first_brace + 3, last_brace):
- if tokens[i - 1].name == 'NL' and tokens[i].name == UNIMPORTANT_WS:
- if min_indent is None:
- min_indent = len(tokens[i].src)
- elif len(tokens[i].src) < min_indent:
- min_indent = len(tokens[i].src)
-
- indents.append(i)
+ return Fix(braces=(first_brace, last_brace), initial_indent=initial_indent)
- for i in indents:
- oldlen = len(tokens[i].src)
- newlen = oldlen - min_indent + new_indent
- tokens[i] = tokens[i]._replace(src=' ' * newlen)
- # fix close hugging
- if hug_close:
- tokens[last_brace:last_brace] = [
- Token('NL', '\n'),
- Token(UNIMPORTANT_WS, ' ' * initial_indent),
- ]
- last_brace += 2
-
- # From there, we can walk backwards and decide whether a comma is needed
- i = last_brace - 1
- while tokens[i].name in NON_CODING_TOKENS:
- i -= 1
-
- # If we're not a hugging paren, we can insert a comma
- if tokens[i].src != ',' and i + 1 != last_brace:
- tokens.insert(i + 1, Token('OP', ','))
-
-
-def _fix_call(call, i, tokens):
+def _find_call(call, i, tokens):
# When we get a `call` object, the ast refers to it as this:
#
# func_name(arg, arg, arg)
@@ -273,10 +238,10 @@ def _fix_call(call, i, tokens):
else:
raise AssertionError('Past end?')
- _fix_inner(brace_start, brace_end, first_brace, tokens)
+ return _find_simple(brace_start, brace_end, first_brace, tokens)
-def _fix_literal(literal, i, tokens):
+def _find_literal(literal, i, tokens):
brace_start, brace_end = literal.braces
# tuples are evil, we need to backtrack to find the opening paren
@@ -289,7 +254,60 @@ def _fix_literal(literal, i, tokens):
if tokens[i].src != brace_start:
return
- _fix_inner(brace_start, brace_end, i, tokens)
+ return _find_simple(brace_start, brace_end, i, tokens)
+
+
+def _fix_comma_and_unhug(fix_data, add_comma, tokens):
+ first_brace, last_brace = fix_data.braces
+
+ # Figure out if either of the braces are "hugging"
+ hug_open = tokens[first_brace + 1].name not in NON_CODING_TOKENS
+ hug_close = tokens[last_brace - 1].name not in NON_CODING_TOKENS
+ if hug_open and tokens[last_brace - 1].src in END_BRACES:
+ hug_open = hug_close = False
+
+ # fix open hugging
+ if hug_open:
+ new_indent = fix_data.initial_indent + 4
+
+ tokens[first_brace + 1:first_brace + 1] = [
+ Token('NL', '\n'), Token(UNIMPORTANT_WS, ' ' * new_indent),
+ ]
+ last_brace += 2
+
+ # Adust indentation for the rest of the things
+ min_indent = None
+ indents = []
+ for i in range(first_brace + 3, last_brace):
+ if tokens[i - 1].name == 'NL' and tokens[i].name == UNIMPORTANT_WS:
+ if min_indent is None:
+ min_indent = len(tokens[i].src)
+ elif len(tokens[i].src) < min_indent:
+ min_indent = len(tokens[i].src)
+
+ indents.append(i)
+
+ for i in indents:
+ oldlen = len(tokens[i].src)
+ newlen = oldlen - min_indent + new_indent
+ tokens[i] = tokens[i]._replace(src=' ' * newlen)
+
+ # fix close hugging
+ if hug_close:
+ tokens[last_brace:last_brace] = [
+ Token('NL', '\n'),
+ Token(UNIMPORTANT_WS, ' ' * fix_data.initial_indent),
+ ]
+ last_brace += 2
+
+ # From there, we can walk backwards and decide whether a comma is needed
+ i = last_brace - 1
+ while tokens[i].name in NON_CODING_TOKENS:
+ i -= 1
+
+ # If we're not a hugging paren, we can insert a comma
+ if add_comma and tokens[i].src != ',' and i + 1 != last_brace:
+ tokens.insert(i + 1, Token('OP', ','))
def _fix_src(contents_text, py35_plus):
@@ -305,16 +323,25 @@ def _fix_src(contents_text, py35_plus):
tokens = src_to_tokens(contents_text)
for i, token in reversed(tuple(enumerate(tokens))):
key = Offset(token.line, token.utf8_byte_offset)
+ add_comma = True
+ fix_data = None
+
if key in visitor.calls:
call = visitor.calls[key]
# Only fix stararg calls if asked to
- if not call.star_args or py35_plus:
- _fix_call(call, i, tokens)
- elif key in visitor.literals:
- _fix_literal(visitor.literals[key], i, tokens)
+ add_comma = not call.star_args or py35_plus
+ fix_data = _find_call(call, i, tokens)
elif key in visitor.funcs:
+ func = visitor.funcs[key]
+ # any amount of starargs excludes adding a comma for defs
+ add_comma = not func.star_args
# functions can be treated as calls
- _fix_call(visitor.funcs[key], i, tokens)
+ fix_data = _find_call(func, i, tokens)
+ elif key in visitor.literals:
+ fix_data = _find_literal(visitor.literals[key], i, tokens)
+
+ if fix_data is not None:
+ _fix_comma_and_unhug(fix_data, add_comma, tokens)
return tokens_to_src(tokens)
| microbug: `def f(*args):` and `f(*args)` are not unhugged
### minimal example
```python
def f(
*args): pass
f(
*args)
```
### expected
```python
def f(
*args
): pass
f(
*args
)
```
### actual
no change
### explain
these two types are pruned during the trailing comma determination, but should still apply unhugging | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 810b741..c016b12 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -11,6 +11,7 @@ from add_trailing_comma import _fix_src
from add_trailing_comma import main
+xfailif_py2 = pytest.mark.xfail(sys.version_info < (3,), reason='py3+')
xfailif_lt_py35 = pytest.mark.xfail(sys.version_info < (3, 5), reason='py35+')
@@ -264,7 +265,7 @@ def test_noop_tuple_literal_without_braces():
# *args forbid trailing commas
'def f(\n'
' *args\n'
- '): pass'
+ '): pass',
# **kwargs forbid trailing commas
'def f(\n'
' **kwargs\n'
@@ -415,12 +416,56 @@ def test_noop_unhugs(src):
' 1,\n'
')',
),
+ (
+ 'f(\n'
+ ' *args)',
+
+ 'f(\n'
+ ' *args\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
assert _fix_src(src, py35_plus=False) == expected
+@xfailif_py2
[email protected](
+ ('src', 'expected'),
+ (
+ # python 2 doesn't give offset information for starargs
+ (
+ 'def f(\n'
+ ' *args): pass',
+
+ 'def f(\n'
+ ' *args\n'
+ '): pass',
+ ),
+ (
+ 'def f(\n'
+ ' **kwargs): pass',
+
+ 'def f(\n'
+ ' **kwargs\n'
+ '): pass',
+ ),
+ # python 2 doesn't kwonlyargs
+ (
+ 'def f(\n'
+ ' *, kw=1, kw2=2): pass',
+
+ 'def f(\n'
+ ' *, kw=1, kw2=2\n'
+ '): pass',
+ ),
+ ),
+)
+def test_fix_unhugs_py3_only(src, expected):
+ assert _fix_src(src, py35_plus=False) == expected
+
+
def test_main_trivial():
assert main(()) == 0
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r requirements-dev.txt && pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@8d87f678b13ac1497b688173e94d21d8371746dc#egg=add_trailing_comma
cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
iniconfig==2.1.0
mccabe==0.7.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
PyYAML==6.0.2
tokenize_rt==6.1.0
tomli==2.2.1
virtualenv==20.29.3
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- iniconfig==2.1.0
- mccabe==0.7.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pyyaml==6.0.2
- tokenize-rt==6.1.0
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def"
] | [
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x"
] | [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
] | [] | MIT License | 1,460 | 2,475 | [
"add_trailing_comma.py"
] |
|
asottile__add-trailing-comma-12 | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | 2017-07-14 15:44:54 | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 7a571a7..882cb9d 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -7,6 +7,7 @@ import collections
import io
import sys
+from tokenize_rt import ESCAPED_NL
from tokenize_rt import src_to_tokens
from tokenize_rt import Token
from tokenize_rt import tokens_to_src
@@ -20,8 +21,8 @@ Literal = collections.namedtuple('Literal', ('node', 'backtrack'))
Literal.__new__.__defaults__ = (False,)
Fix = collections.namedtuple('Fix', ('braces', 'multi_arg', 'initial_indent'))
-NEWLINES = frozenset(('NEWLINE', 'NL'))
-NON_CODING_TOKENS = frozenset(('COMMENT', 'NL', UNIMPORTANT_WS))
+NEWLINES = frozenset((ESCAPED_NL, 'NEWLINE', 'NL'))
+NON_CODING_TOKENS = frozenset(('COMMENT', ESCAPED_NL, 'NL', UNIMPORTANT_WS))
INDENT_TOKENS = frozenset(('INDENT', UNIMPORTANT_WS))
START_BRACES = frozenset(('(', '{', '['))
END_BRACES = frozenset((')', '}', ']'))
diff --git a/setup.py b/setup.py
index 39c86ff..828d6a9 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ setup(
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
- install_requires=['tokenize-rt'],
+ install_requires=['tokenize-rt>=2'],
py_modules=['add_trailing_comma'],
entry_points={
'console_scripts': ['add-trailing-comma = add_trailing_comma:main'],
| escaped newlines are throwing off indent detection
This should be a noop:
```python
x = y.\
foo(
bar,
)
```
However, this is the current behaviour:
```diff
x = y.\
foo(
bar,
- )
+)
```
Might need help from https://github.com/asottile/tokenize-rt/issues/1 | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 9f67fb3..d41af5c 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -501,6 +501,11 @@ def test_fix_unhugs_py3_only(src, expected):
' 1, 2, 3, 4,\n'
' ],\n'
']',
+ # Regression test for #11
+ 'foo.\\\n'
+ ' bar(\n'
+ ' 5,\n'
+ ' )',
),
)
def test_noop_trailing_brace(src):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@49a0d757435b4962c58f8d4f48ba85c7f2f5256f#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n"
] | [] | [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
] | [] | MIT License | 1,463 | 435 | [
"add_trailing_comma.py",
"setup.py"
] |
|
tox-dev__tox-554 | 682b96094b971b294c931c7464fbafe846308d4d | 2017-07-15 13:23:25 | e374ce61bf101fb2cc2eddd955f57048df153017 | diff --git a/tox/_quickstart.py b/tox/_quickstart.py
index 37c48ddc..bc283a8a 100644
--- a/tox/_quickstart.py
+++ b/tox/_quickstart.py
@@ -40,6 +40,7 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
+import argparse
import sys
from os import path
from codecs import open
@@ -224,19 +225,24 @@ def generate(d, overwrite=True, silent=False):
def write_file(fpath, mode, content):
print('Creating file %s.' % fpath)
- f = open(fpath, mode, encoding='utf-8')
try:
- f.write(content)
- finally:
- f.close()
+ with open(fpath, mode, encoding='utf-8') as f:
+ f.write(content)
+ except IOError:
+ print('Error writing file.')
+ raise
sys.stdout.write('\n')
- fpath = 'tox.ini'
+ fpath = path.join(d.get('path', ''), 'tox.ini')
if path.isfile(fpath) and not overwrite:
print('File %s already exists.' % fpath)
- do_prompt(d, 'fpath', 'Alternative path to write tox.ini contents to', 'tox-generated.ini')
+ do_prompt(
+ d,
+ 'fpath',
+ 'Alternative path to write tox.ini contents to',
+ path.join(d.get('path', ''), 'tox-generated.ini'))
fpath = d['fpath']
write_file(fpath, 'w', conf_text)
@@ -251,14 +257,25 @@ Execute `tox` to test your project.
''')
+def parse_args(argv):
+ parser = argparse.ArgumentParser(
+ description='Command-line script to quickly setup tox.ini for a Python project.'
+ )
+ parser.add_argument(
+ 'root', type=str, nargs='?', default='.',
+ help='Custom root directory to write tox.ini to. Defaults to current directory.'
+ )
+ parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
+
+ args = argv[1:]
+ return parser.parse_args(args)
+
+
def main(argv=sys.argv):
- d = {}
+ args = parse_args(argv)
- if len(argv) > 3:
- print('Usage: tox-quickstart [root]')
- sys.exit(1)
- elif len(argv) == 2:
- d['path'] = argv[1]
+ d = {}
+ d['path'] = args.root
try:
ask_user(d)
@@ -268,8 +285,13 @@ def main(argv=sys.argv):
return
d = process_input(d)
- generate(d, overwrite=False)
+ try:
+ generate(d, overwrite=False)
+ except Exception:
+ return 2
+
+ return 0
if __name__ == '__main__':
- main()
+ sys.exit(main())
| tox-quickstart should have a --help
- Bitbucket: https://bitbucket.org/hpk42/tox/issue/315
- Originally reported by: @warsaw
- Originally created at: 2016-02-16T16:16:47.537
and probably a --version too.
| tox-dev/tox | diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index 49b2b0c0..76551008 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -379,6 +379,106 @@ deps =
result = read_tox('tox-generated.ini')
assert(result == expected_tox_ini)
+ def test_quickstart_main_tox_ini_location_can_be_overridden(
+ self,
+ tmpdir,
+ monkeypatch):
+ monkeypatch.setattr(
+ tox._quickstart, 'term_input',
+ self.get_mock_term_input(
+ [
+ '1', # py27 and py33
+ 'py.test', # command to run tests
+ '', # test dependencies
+ ]
+ )
+ )
+
+ root_dir = tmpdir.mkdir('alt-root')
+ tox_ini_path = root_dir.join('tox.ini')
+
+ tox._quickstart.main(argv=['tox-quickstart', root_dir.basename])
+
+ assert tox_ini_path.isfile()
+
+ expected_tox_ini = """
+# Tox (https://tox.readthedocs.io/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+
+[testenv]
+commands = py.test
+deps =
+ pytest
+""".lstrip()
+ result = read_tox(fname=tox_ini_path.strpath)
+ assert(result == expected_tox_ini)
+
+ def test_quickstart_main_custom_tox_ini_location_with_existing_tox_ini(
+ self,
+ tmpdir,
+ monkeypatch):
+ monkeypatch.setattr(
+ tox._quickstart, 'term_input',
+ self.get_mock_term_input(
+ [
+ '1', # py27 and py33
+ 'py.test', # command to run tests
+ '', # test dependencies
+ '', # tox.ini already exists; overwrite?
+ ]
+ )
+ )
+
+ root_dir = tmpdir.mkdir('alt-root')
+ tox_ini_path = root_dir.join('tox.ini')
+ tox_ini_path.write('foo\nbar\n')
+
+ tox._quickstart.main(argv=['tox-quickstart', root_dir.basename])
+ tox_ini_path = root_dir.join('tox-generated.ini')
+
+ assert tox_ini_path.isfile()
+
+ expected_tox_ini = """
+# Tox (https://tox.readthedocs.io/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+
+[testenv]
+commands = py.test
+deps =
+ pytest
+""".lstrip()
+ result = read_tox(fname=tox_ini_path.strpath)
+ assert(result == expected_tox_ini)
+
+ def test_quickstart_main_custom_nonexistent_tox_ini_location(
+ self,
+ tmpdir,
+ monkeypatch):
+ monkeypatch.setattr(
+ tox._quickstart, 'term_input',
+ self.get_mock_term_input(
+ [
+ '1', # py27 and py33
+ 'py.test', # command to run tests
+ '', # test dependencies
+ ]
+ )
+ )
+
+ root_dir = tmpdir.join('nonexistent-root')
+
+ assert tox._quickstart.main(argv=['tox-quickstart', root_dir.basename]) == 2
+
class TestToxQuickstart(object):
def test_pytest(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-timeout"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
distlib==0.3.9
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-timeout==2.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tox-dev/tox.git@682b96094b971b294c931c7464fbafe846308d4d#egg=tox
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- platformdirs==2.4.0
- pytest-timeout==2.1.0
- virtualenv==20.17.1
prefix: /opt/conda/envs/tox
| [
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_tox_ini_location_can_be_overridden",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_custom_tox_ini_location_with_existing_tox_ini",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_custom_nonexistent_tox_ini_location"
] | [] | [
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_pytest",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_nose_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_trial_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_py27_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_py27_and_py33_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_all_pythons_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_defaults",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_existing_tox_ini",
"tests/test_quickstart.py::TestToxQuickstart::test_pytest",
"tests/test_quickstart.py::TestToxQuickstart::test_setup_py_test",
"tests/test_quickstart.py::TestToxQuickstart::test_trial",
"tests/test_quickstart.py::TestToxQuickstart::test_nosetests"
] | [] | MIT License | 1,467 | 710 | [
"tox/_quickstart.py"
] |
|
asottile__add-trailing-comma-18 | f1666043a4ef3aabec4021acd8946b36209d546e | 2017-07-15 19:42:04 | e6cfc6a9976fc305b0054b30995b5407fea833a5 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 0f86212..2884adf 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -271,7 +271,7 @@ def _fix_brace(fix_data, add_comma, tokens):
indents = []
insert_indents = []
for i in range(first_brace + 3, last_brace):
- if tokens[i - 1].name == 'NL':
+ if tokens[i - 1].name == 'NL' and tokens[i].name != 'NL':
if tokens[i].name != UNIMPORTANT_WS:
min_indent = 0
insert_indents.append(i)
| Blank lines may be considered as "minimum" indentation while unhugging
They should be ignored, this currently introduces trailing whitespace:
```python
x('foo', (
'bar',
'baz',
))
``` | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index b8b6f73..1cd26c9 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -473,6 +473,22 @@ def test_noop_unhugs(src):
' "bar"\n'
')',
),
+ # Regression test for #17
+ (
+ 'x("foo", (\n'
+ ' "bar",\n'
+ '\n'
+ ' "baz",\n'
+ '))',
+
+ 'x(\n'
+ ' "foo", (\n'
+ ' "bar",\n'
+ '\n'
+ ' "baz",\n'
+ ' ),\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@f1666043a4ef3aabec4021acd8946b36209d546e#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\","
] | [] | [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
] | [] | MIT License | 1,468 | 177 | [
"add_trailing_comma.py"
] |
|
asottile__add-trailing-comma-20 | e6cfc6a9976fc305b0054b30995b5407fea833a5 | 2017-07-16 21:34:30 | e6cfc6a9976fc305b0054b30995b5407fea833a5 | asottile: This also removes the early-pruning of things that the ast tells us aren't multiline -- this potentially increases the runtime as it'll now run on more things -- but it makes everything more consistent and should ensure everything resolves in a single pass. | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index d8f1939..9ef7e2f 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -64,16 +64,12 @@ class FindNodes(ast.NodeVisitor):
self.literals = {}
self.has_new_syntax = False
- def _visit_literal(self, node, key='elts', is_multiline=False, **kwargs):
- orig = node.lineno
-
+ def _visit_literal(self, node, key='elts', **kwargs):
for elt in getattr(node, key):
- if elt.lineno > orig:
- is_multiline = True
if _is_star_arg(elt): # pragma: no cover (PY35+)
self.has_new_syntax = True
- if is_multiline:
+ if getattr(node, key):
key = Offset(node.lineno, node.col_offset)
self.literals[key] = Literal(node, **kwargs)
self.generic_visit(node)
@@ -87,13 +83,10 @@ class FindNodes(ast.NodeVisitor):
self._visit_literal(node, key='values')
def visit_Tuple(self, node):
- # tuples lie about things, so we pretend they are all multiline
- # and tell the later machinery to backtrack
- self._visit_literal(node, is_multiline=True, backtrack=True)
+ # tuples lie about things so we tell the later machiner to backtrack
+ self._visit_literal(node, backtrack=True)
def visit_Call(self, node):
- orig = node.lineno
-
argnodes = node.args + node.keywords
py2_starargs = getattr(node, 'starargs', None)
if py2_starargs: # pragma: no cover (<PY35)
@@ -103,7 +96,6 @@ class FindNodes(ast.NodeVisitor):
argnodes.append(py2_kwargs)
arg_offsets = set()
- is_multiline = False
has_starargs = bool(py2_starargs or py2_kwargs)
for argnode in argnodes:
if (
@@ -115,8 +107,6 @@ class FindNodes(ast.NodeVisitor):
offset = _to_offset(argnode)
# multiline strings have invalid position, ignore them
if offset.utf8_byte_offset != -1: # pragma: no branch (cpy bug)
- if offset.line > orig:
- is_multiline = True
arg_offsets.add(offset)
# If the sole argument is a generator, don't add a trailing comma as
@@ -125,7 +115,7 @@ class FindNodes(ast.NodeVisitor):
len(argnodes) == 1 and isinstance(argnodes[0], ast.GeneratorExp)
)
- if is_multiline and not only_a_generator:
+ if arg_offsets and not only_a_generator:
key = Offset(node.lineno, node.col_offset)
self.calls[key] = Call(node, has_starargs, arg_offsets)
@@ -144,16 +134,12 @@ class FindNodes(ast.NodeVisitor):
getattr(node.args, 'kwonlyargs', None)
)
- orig = node.lineno
- is_multiline = False
offsets = set()
for argnode in node.args.args:
offset = _to_offset(argnode)
- if offset.line > orig:
- is_multiline = True
offsets.add(offset)
- if is_multiline and not has_starargs:
+ if offsets and not has_starargs:
key = Offset(node.lineno, node.col_offset)
self.funcs[key] = Func(node, offsets)
@@ -181,7 +167,7 @@ def _find_simple(first_brace, tokens):
last_brace = i
- # This was not actually a multi-line call, despite the ast telling us that
+ # Check if we're actually multi-line
if tokens[first_brace].line == tokens[last_brace].line:
return
| Two iterations are required to resolve func(multi line string literal)
### input
```python
f('long'
'literal')
```
### output 1
```python
f(
'long'
'literal'
)
```
### output 2
```python
f(
'long'
'literal',
)
```
This _should_ resolve in a single pass | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 01ee421..450e3a0 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -257,6 +257,7 @@ def test_noop_tuple_literal_without_braces():
@pytest.mark.parametrize(
'src',
(
+ 'def f(): pass',
'def f(arg1, arg2): pass',
'def f(\n'
' arg1,\n'
@@ -354,6 +355,22 @@ def test_noop_unhugs(src):
' c,\n'
')',
),
+ (
+ 'def f(\n'
+ ' *args): pass',
+
+ 'def f(\n'
+ ' *args\n'
+ '): pass',
+ ),
+ (
+ 'def f(\n'
+ ' **kwargs): pass',
+
+ 'def f(\n'
+ ' **kwargs\n'
+ '): pass',
+ ),
# if there's already a trailing comma, don't add a new one
(
'f(\n'
@@ -493,6 +510,16 @@ def test_noop_unhugs(src):
' ),\n'
')',
),
+ # Regression test for #16
+ (
+ 'x("foo"\n'
+ ' "bar")',
+
+ 'x(\n'
+ ' "foo"\n'
+ ' "bar",\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
@@ -503,23 +530,6 @@ def test_fix_unhugs(src, expected):
@pytest.mark.parametrize(
('src', 'expected'),
(
- # python 2 doesn't give offset information for starargs
- (
- 'def f(\n'
- ' *args): pass',
-
- 'def f(\n'
- ' *args\n'
- '): pass',
- ),
- (
- 'def f(\n'
- ' **kwargs): pass',
-
- 'def f(\n'
- ' **kwargs\n'
- '): pass',
- ),
# python 2 doesn't kwonlyargs
(
'def f(\n'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@e6cfc6a9976fc305b0054b30995b5407fea833a5#egg=add_trailing_comma
cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
iniconfig==2.1.0
mccabe==0.7.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
PyYAML==6.0.2
tokenize_rt==6.1.0
tomli==2.2.1
virtualenv==20.29.3
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- iniconfig==2.1.0
- mccabe==0.7.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pyyaml==6.0.2
- tokenize-rt==6.1.0
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n"
] | [
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x"
] | [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
] | [] | MIT License | 1,474 | 912 | [
"add_trailing_comma.py"
] |
asottile__add-trailing-comma-25 | c7da498ebb0549a0925b8f4c3502d8fd27f554b8 | 2017-07-17 18:06:47 | 3343fe9ba1b396342d27a73fafa88807b47fc254 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index da4d733..70cb166 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -63,11 +63,12 @@ class FindNodes(ast.NodeVisitor):
self.calls = collections.defaultdict(list)
self.funcs = {}
self.literals = {}
+ self.tuples = {}
- def _visit_literal(self, node, key='elts', **kwargs):
+ def _visit_literal(self, node, key='elts'):
if getattr(node, key):
key = Offset(node.lineno, node.col_offset)
- self.literals[key] = Literal(node, **kwargs)
+ self.literals[key] = Literal(node)
self.generic_visit(node)
visit_Set = visit_List = _visit_literal
@@ -76,8 +77,11 @@ class FindNodes(ast.NodeVisitor):
self._visit_literal(node, key='values')
def visit_Tuple(self, node):
- # tuples lie about things so we tell the later machiner to backtrack
- self._visit_literal(node, backtrack=True)
+ if node.elts:
+ key = Offset(node.lineno, node.col_offset)
+ # tuples lie about offset -- tell the later machinery to backtrack
+ self.tuples[key] = Literal(node, backtrack=True)
+ self.generic_visit(node)
def visit_Call(self, node):
argnodes = node.args + node.keywords
@@ -200,16 +204,15 @@ def _find_call(call, i, tokens):
return _find_simple(first_brace, tokens)
-def _find_literal(literal, i, tokens):
+def _find_tuple(i, tokens):
# tuples are evil, we need to backtrack to find the opening paren
- if literal.backtrack:
+ i -= 1
+ while tokens[i].name in NON_CODING_TOKENS:
i -= 1
- while tokens[i].name in NON_CODING_TOKENS:
- i -= 1
- # Sometimes tuples don't even have a paren!
- # x = 1, 2, 3
- if tokens[i].src != '(':
- return
+ # Sometimes tuples don't even have a paren!
+ # x = 1, 2, 3
+ if tokens[i].src != '(':
+ return
return _find_simple(i, tokens)
@@ -326,13 +329,14 @@ def _fix_src(contents_text, py35_plus):
# Handle parenthesized things
elif token.src == '(':
fixes.append((False, _find_simple(i, tokens)))
+ elif key in visitor.literals:
+ fixes.append((True, _find_simple(i, tokens)))
# need to additionally handle literals afterwards as tuples report
# their starting index as the first element, which may be one of the
# above things.
- if key in visitor.literals:
- fix_data = _find_literal(visitor.literals[key], i, tokens)
- fixes.append((True, fix_data))
+ if key in visitor.tuples:
+ fixes.append((True, _find_tuple(i, tokens)))
for add_comma, fix_data in fixes:
if fix_data is not None:
| Regression ({}, ()) is not adding a trailing comma
Similar to #22
```python
(
{k: v},
()
)
``
Is not adding a trailing comma after the second tuple | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 5229737..a7e4abe 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -124,6 +124,18 @@ def test_py35_plus_rewrite():
' x,\n'
')',
),
+ # Regression test for #23
+ (
+ '(\n'
+ ' {k: v},\n'
+ ' ()\n'
+ ')',
+
+ '(\n'
+ ' {k: v},\n'
+ ' (),\n'
+ ')',
+ ),
),
)
def test_fixes_calls(src, expected):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@c7da498ebb0549a0925b8f4c3502d8fd27f554b8#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fixes_calls[(\\n"
] | [] | [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
] | [] | MIT License | 1,478 | 756 | [
"add_trailing_comma.py"
] |
|
jupyter__nbgrader-845 | e2f288ce4a11d08db211e67b7c0d2f9ff0c5656a | 2017-07-18 16:08:12 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/__init__.py b/nbgrader/__init__.py
index 7e68cab9..e7ecd231 100644
--- a/nbgrader/__init__.py
+++ b/nbgrader/__init__.py
@@ -3,17 +3,12 @@ A system for assigning and grading notebooks.
"""
import os
+import sys
from ._version import version_info, __version__
def _jupyter_nbextension_paths():
- return [
- dict(
- section="tree",
- src=os.path.join('nbextensions', 'assignment_list'),
- dest="assignment_list",
- require="assignment_list/main"
- ),
+ paths = [
dict(
section="notebook",
src=os.path.join('nbextensions', 'create_assignment'),
@@ -34,9 +29,26 @@ def _jupyter_nbextension_paths():
),
]
+ if sys.platform != 'win32':
+ paths.append(
+ dict(
+ section="tree",
+ src=os.path.join('nbextensions', 'assignment_list'),
+ dest="assignment_list",
+ require="assignment_list/main"
+ )
+ )
+
+ return paths
+
+
def _jupyter_server_extension_paths():
- return [
- dict(module="nbgrader.server_extensions.assignment_list"),
+ paths = [
dict(module="nbgrader.server_extensions.formgrader"),
dict(module="nbgrader.server_extensions.validate_assignment")
]
+
+ if sys.platform != 'win32':
+ paths.append(dict(module="nbgrader.server_extensions.assignment_list"))
+
+ return paths
| Disable Assignment List extension on Windows
See https://github.com/conda-forge/nbgrader-feedstock/issues/12
@lgpage | jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_extension.py b/nbgrader/tests/apps/test_nbgrader_extension.py
index f0c75f3d..c636413a 100644
--- a/nbgrader/tests/apps/test_nbgrader_extension.py
+++ b/nbgrader/tests/apps/test_nbgrader_extension.py
@@ -1,25 +1,82 @@
import os
-
import nbgrader
+import sys
+import contextlib
+
+
[email protected]
+def mock_platform(platform):
+ old_platform = sys.platform
+ sys.platform = platform
+ yield
+ sys.platform = old_platform
+
+
+def test_nbextension_linux():
+ from nbgrader import _jupyter_nbextension_paths
+ with mock_platform("linux"):
+ nbexts = _jupyter_nbextension_paths()
+ assert len(nbexts) == 4
+ assert nbexts[0]['section'] == 'notebook'
+ assert nbexts[1]['section'] == 'tree'
+ assert nbexts[2]['section'] == 'notebook'
+ assert nbexts[3]['section'] == 'tree'
+ paths = [ext['src'] for ext in nbexts]
+ for path in paths:
+ assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
+
+
+def test_nbextension_mac():
+ from nbgrader import _jupyter_nbextension_paths
+ with mock_platform("darwin"):
+ nbexts = _jupyter_nbextension_paths()
+ assert len(nbexts) == 4
+ assert nbexts[0]['section'] == 'notebook'
+ assert nbexts[1]['section'] == 'tree'
+ assert nbexts[2]['section'] == 'notebook'
+ assert nbexts[3]['section'] == 'tree'
+ paths = [ext['src'] for ext in nbexts]
+ for path in paths:
+ assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
-def test_nbextension():
+def test_nbextension_windows():
from nbgrader import _jupyter_nbextension_paths
- nbexts = _jupyter_nbextension_paths()
- assert len(nbexts) == 4
- assert nbexts[0]['section'] == 'tree'
- assert nbexts[1]['section'] == 'notebook'
- assert nbexts[2]['section'] == 'tree'
- assert nbexts[3]['section'] == 'notebook'
- paths = [ext['src'] for ext in nbexts]
- for path in paths:
- assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
-
-
-def test_serverextension():
+ with mock_platform("win32"):
+ nbexts = _jupyter_nbextension_paths()
+ assert len(nbexts) == 3
+ assert nbexts[0]['section'] == 'notebook'
+ assert nbexts[1]['section'] == 'tree'
+ assert nbexts[2]['section'] == 'notebook'
+ paths = [ext['src'] for ext in nbexts]
+ for path in paths:
+ assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
+
+
+def test_serverextension_linux():
+ from nbgrader import _jupyter_server_extension_paths
+ with mock_platform("linux"):
+ serverexts = _jupyter_server_extension_paths()
+ assert len(serverexts) == 3
+ assert serverexts[0]['module'] == 'nbgrader.server_extensions.formgrader'
+ assert serverexts[1]['module'] == 'nbgrader.server_extensions.validate_assignment'
+ assert serverexts[2]['module'] == 'nbgrader.server_extensions.assignment_list'
+
+
+def test_serverextension_mac():
+ from nbgrader import _jupyter_server_extension_paths
+ with mock_platform("darwin"):
+ serverexts = _jupyter_server_extension_paths()
+ assert len(serverexts) == 3
+ assert serverexts[0]['module'] == 'nbgrader.server_extensions.formgrader'
+ assert serverexts[1]['module'] == 'nbgrader.server_extensions.validate_assignment'
+ assert serverexts[2]['module'] == 'nbgrader.server_extensions.assignment_list'
+
+
+def test_serverextension_windows():
from nbgrader import _jupyter_server_extension_paths
- serverexts = _jupyter_server_extension_paths()
- assert len(serverexts) == 3
- assert serverexts[0]['module'] == 'nbgrader.server_extensions.assignment_list'
- assert serverexts[1]['module'] == 'nbgrader.server_extensions.formgrader'
- assert serverexts[2]['module'] == 'nbgrader.server_extensions.validate_assignment'
+ with mock_platform("win32"):
+ serverexts = _jupyter_server_extension_paths()
+ assert len(serverexts) == 2
+ assert serverexts[0]['module'] == 'nbgrader.server_extensions.formgrader'
+ assert serverexts[1]['module'] == 'nbgrader.server_extensions.validate_assignment'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyenchant",
"sphinxcontrib-spelling",
"sphinx_rtd_theme",
"nbval",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
contextvars==2.4
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@e2f288ce4a11d08db211e67b7c0d2f9ff0c5656a#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- contextvars==2.4
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_extension.py::test_nbextension_linux",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_nbextension_mac",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_nbextension_windows",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_serverextension_linux",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_serverextension_mac",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_serverextension_windows"
] | [] | [] | [] | BSD 3-Clause "New" or "Revised" License | 1,481 | 373 | [
"nbgrader/__init__.py"
] |
|
vertexproject__synapse-349 | 129e058c323f9ff0e1d130c0fa47bdfd423f6515 | 2017-07-18 20:33:45 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/models/files.py b/synapse/models/files.py
index 63916defd..1dcf82c57 100644
--- a/synapse/models/files.py
+++ b/synapse/models/files.py
@@ -87,6 +87,8 @@ class FileMod(CoreModule):
Hashes that we consider "cardinal enough" to pivot.
'''
name = prop.rsplit(':', 1)[-1]
+ # Normalize the valu before we go any further
+ valu, _ = self.core.getPropNorm(prop, valu)
props[name] = valu
# FIXME could we update additional hashes here and
@@ -107,10 +109,12 @@ class FileMod(CoreModule):
return tufo
def seedFileMd5(self, prop, valu, **props):
+ valu, _ = self.core.getPropNorm('file:bytes:md5', valu)
props['md5'] = valu
return self.core.formTufoByProp('file:bytes', valu, **props)
def seedFileSha1(self, prop, valu, **props):
+ valu, _ = self.core.getPropNorm('file:bytes:sha1', valu)
props['sha1'] = valu
valu = guid(valu)
return self.core.formTufoByProp('file:bytes', valu, **props)
| Subsystem that parses ingest.json files calculates different file:bytes GUID (superhash) based on capitalization of hash value
Identified during testing for #334 .
Synapse calculates a GUID as a primary property for a file:bytes node based on the available hash value(s) for the bytes. If the bytes are present, all hashes are used; if we only have a hash and no bytes, the available hash is used.
The GUID should be calculated the same way regardless of whether the hash contains upper case or lower case alpha characters. This is true when adding nodes via Storm:
```
cli> ask [file:bytes:sha256=6ACC29BFC5F8F772FA7AAF4A705F91CB68DC88CB22F4EF5101281DC42109A104]
file:bytes = 8a249054e1c0e455657867a817aa9fcc
(1 results)
cli> ask [file:bytes:sha256=6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104]
file:bytes = 8a249054e1c0e455657867a817aa9fcc
(1 results)
```
However, this is *not* the case when adding data via an ingest.json file.
1. Create two test ingest.json files, each adds only a single node based on file:bytes:sha256.
2. In one file (test1.json), create the SHA256 with upper case alpha characters.
3. In the other file (test2.json) create the SHA256 with lower case alpha characters.
Result:
A. Upper case hash:
```
py3 -m synapse.tools.ingest --verbose ~/research/test1.json --sync <cortex>
add: file:bytes=8a249054e1c0e455657867a817aa9fcc
:mime = ??
:sha256 = 6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
add: hash:sha256=6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
ingest took: 0.0028884410858154297 sec
```
B. Lower case hash:
```
py3 -m synapse.tools.ingest --verbose ~/research/test2.json --sync <cortex>
add: file:bytes=ed73917b1dc4011627f7a101ace491c8
:mime = ??
:sha256 = 6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
add: hash:sha256=6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
ingest took: 0.0036017894744873047 sec
```
Note that it appears that the upper-case hash calculates the "correct" GUID (GUID value matches the one generated via Storm), but the lower-case hash does not.
| vertexproject/synapse | diff --git a/synapse/tests/test_model_files.py b/synapse/tests/test_model_files.py
index 0b917710e..f42e5edb1 100644
--- a/synapse/tests/test_model_files.py
+++ b/synapse/tests/test_model_files.py
@@ -1,11 +1,11 @@
from __future__ import absolute_import, unicode_literals
import synapse.axon as s_axon
-import synapse.compat as s_compat
-import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
import synapse.telepath as s_telepath
+import synapse.lib.tufo as s_tufo
+
from synapse.tests.common import *
class FileModelTest(SynTest):
@@ -44,6 +44,22 @@ class FileModelTest(SynTest):
self.ne(t0[0], core.formTufoByProp('file:bytes:sha1', props.get('sha1'))[0])
self.ne(t0[0], core.formTufoByProp('file:bytes:md5', props.get('md5'))[0])
+ def test_model_file_seeds_capitalization(self):
+ fhash = '6ACC29BFC5F8F772FA7AAF4A705F91CB68DC88CB22F4EF5101281DC42109A104'
+ fhash_lower = fhash.lower()
+ stable_guid = 'ed73917b1dc4011627f7a101ace491c8'
+
+ with s_cortex.openurl('ram:///') as core:
+
+ n1 = core.formTufoByProp('file:bytes:sha256', fhash)
+ n2 = core.formTufoByProp('file:bytes:sha256', fhash_lower)
+ # Sha256 should be lowercase since the prop type is lowercased
+ n1def = s_tufo.ndef(n1)
+ n2def = s_tufo.ndef(n2)
+ self.eq(n1def[1], stable_guid)
+ self.eq(n2def[1], stable_guid)
+ self.eq(n1[0], n2[0])
+
def test_filepath(self):
with s_cortex.openurl('ram:///') as core:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/vertexproject/synapse.git@129e058c323f9ff0e1d130c0fa47bdfd423f6515#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- nose==1.3.7
- pycparser==2.21
- pyopenssl==23.2.0
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_seeds_capitalization"
] | [] | [
"synapse/tests/test_model_files.py::FileModelTest::test_filebase",
"synapse/tests/test_model_files.py::FileModelTest::test_filepath",
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_bytes",
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_bytes_axon",
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_seeds",
"synapse/tests/test_model_files.py::FileModelTest::test_model_files_imgof",
"synapse/tests/test_model_files.py::FileModelTest::test_model_files_txtref"
] | [] | Apache License 2.0 | 1,484 | 316 | [
"synapse/models/files.py"
] |
|
Azure__azure-cli-4047 | dd91433b36f95e9c849cbbcecccb0f610a8f0a3c | 2017-07-19 03:10:04 | eb12ac454cbe1ddb59c86cdf2045e1912660e750 | codecov-io: # [Codecov](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=h1) Report
> Merging [#4047](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=desc) into [master](https://codecov.io/gh/Azure/azure-cli/commit/dd91433b36f95e9c849cbbcecccb0f610a8f0a3c?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `n/a`.
[](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #4047 +/- ##
=======================================
Coverage 70.61% 70.61%
=======================================
Files 428 428
Lines 27871 27871
Branches 4267 4267
=======================================
Hits 19680 19680
- Misses 6915 6919 +4
+ Partials 1276 1272 -4
```
| [Impacted Files](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [...zure-cli-vm/azure/cli/command\_modules/vm/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktdm0vYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy92bS9jdXN0b20ucHk=) | `75.45% <ø> (ø)` | :arrow_up: |
| [src/azure-cli-core/azure/cli/core/util.py](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=tree#diff-c3JjL2F6dXJlLWNsaS1jb3JlL2F6dXJlL2NsaS9jb3JlL3V0aWwucHk=) | `70.66% <0%> (ø)` | :arrow_up: |
| [...dback/azure/cli/command\_modules/feedback/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktZmVlZGJhY2svYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy9mZWVkYmFjay9jdXN0b20ucHk=) | `31.25% <0%> (ø)` | :arrow_up: |
| [...nent/azure/cli/command\_modules/component/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktY29tcG9uZW50L2F6dXJlL2NsaS9jb21tYW5kX21vZHVsZXMvY29tcG9uZW50L2N1c3RvbS5weQ==) | `16.23% <0%> (ø)` | :arrow_up: |
| [...li-cloud/azure/cli/command\_modules/cloud/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktY2xvdWQvYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy9jbG91ZC9jdXN0b20ucHk=) | `14.45% <0%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=footer). Last update [dd91433...18754a5](https://codecov.io/gh/Azure/azure-cli/pull/4047?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py
index a0e2fe027..4f08c5271 100644
--- a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py
+++ b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py
@@ -751,7 +751,7 @@ def _get_extension_instance_name(instance_view, publisher, extension_type_name,
full_type_name = '.'.join([publisher, extension_type_name])
if instance_view.extensions:
ext = next((x for x in instance_view.extensions
- if x.type.lower() == full_type_name.lower()), None)
+ if x.type and (x.type.lower() == full_type_name.lower())), None)
if ext:
extension_instance_name = ext.name
return extension_instance_name
| Misleading error message when resetting SSH key on a VM
### Description
The following command works on all my VMs except for one particular one show below.
NOTE:
`az` is an alias to `docker run --rm -v /Users/$(whoami):/root -it azuresdk/azure-cli-python az`
```sh
$ az vm user update -u moody -g myGroup -n myVM --ssh-key-value "$(cat ~/.ssh/id_rsa.pub)"=""
'NoneType' object has no attribute 'lower'
Traceback (most recent call last):
File "/usr/local/lib/python3.5/site-packages/azure/cli/main.py", line 36, in main
cmd_result = APPLICATION.execute(args)
File "/usr/local/lib/python3.5/site-packages/azure/cli/core/application.py", line 211, in execute
result = expanded_arg.func(params)
File "/usr/local/lib/python3.5/site-packages/azure/cli/core/commands/__init__.py", line 351, in __call__
return self.handler(*args, **kwargs)
File "/usr/local/lib/python3.5/site-packages/azure/cli/core/commands/__init__.py", line 550, in _execute_command
reraise(*sys.exc_info())
File "/usr/local/lib/python3.5/site-packages/six.py", line 686, in reraise
raise value
File "/usr/local/lib/python3.5/site-packages/azure/cli/core/commands/__init__.py", line 527, in _execute_command
result = op(client, **kwargs) if client else op(**kwargs)
File "/usr/local/lib/python3.5/site-packages/azure/cli/command_modules/vm/custom.py", line 619, in set_user
return _set_linux_user(vm, resource_group_name, username, password, ssh_key_value, no_wait)
File "/usr/local/lib/python3.5/site-packages/azure/cli/command_modules/vm/custom.py", line 676, in _set_linux_user
protected_settings)
File "/usr/local/lib/python3.5/site-packages/azure/cli/command_modules/vm/custom.py", line 727, in _update_linux_access_extension
_ACCESS_EXT_HANDLER_NAME)
File "/usr/local/lib/python3.5/site-packages/azure/cli/command_modules/vm/custom.py", line 751, in _get_extension_instance_name
if x.type.lower() == full_type_name.lower()), None)
File "/usr/local/lib/python3.5/site-packages/azure/cli/command_modules/vm/custom.py", line 751, in <genexpr>
if x.type.lower() == full_type_name.lower()), None)
AttributeError: 'NoneType' object has no attribute 'lower'
```
I tried checking the Azure console and found the following message on this particular VM when resetting SSH key
```
The VM agent is either unavailable, or not installed, which may prevent VMAccess from running.
```
Screenshot

_It would be a lot more useful to see that error in the CLI as well instead of a python dump._
---
### Environment summary
**Install Method:** How did you install the CLI? (e.g. pip, interactive script, apt-get, Docker, MSI, nightly)
Answer here: Docker
**CLI Version:** What version of the CLI and modules are installed? (Use `az --version`)
Answer here:
```
azure-cli (2.0.10+dev)
acr (2.0.8+dev)
acs (2.0.10+dev)
appservice (0.1.10+dev)
batch (3.0.3+dev)
billing (0.1.3+dev)
cdn (0.0.6+dev)
cloud (2.0.6+dev)
cognitiveservices (0.1.6+dev)
command-modules-nspkg (2.0.1+dev)
component (2.0.6+dev)
configure (2.0.10+dev)
consumption (0.1.3+dev)
core (2.0.11+dev)
cosmosdb (0.1.10+dev)
dla (0.0.10+dev)
dls (0.0.10+dev)
feedback (2.0.6+dev)
find (0.2.6+dev)
interactive (0.3.6+dev)
iot (0.1.9+dev)
keyvault (2.0.8+dev)
lab (0.0.8+dev)
monitor (0.0.8+dev)
network (2.0.10+dev)
nspkg (3.0.1+dev)
profile (2.0.8+dev)
rdbms (0.0.5+dev)
redis (0.2.7+dev)
resource (2.0.10+dev)
role (2.0.8+dev)
sf (1.0.5+dev)
sql (2.0.7+dev)
storage (2.0.10+dev)
taskhelp (0.1.5+dev)
vm (2.0.10+dev)
Python (Linux) 3.5.2 (default, Dec 27 2016, 21:33:11)
[GCC 5.3.0]
Python location '/usr/local/bin/python'
```
**OS Version:** What OS and version are you using?
Answer here: macOS Sierra 10.12.5 (16F2104)
**Shell Type:** What shell are you using? (e.g. bash, cmd.exe, Bash on Windows)
Answer here: zsh
| Azure/azure-cli | diff --git a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py
index 3ddcc7996..58d51f6d9 100644
--- a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py
+++ b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py
@@ -356,6 +356,18 @@ class Test_Vm_Custom(unittest.TestCase):
# assert
self.assertEqual(result, 'extension1')
+ def test_get_extension_instance_name_when_type_none(self):
+ instance_view = mock.MagicMock()
+ extension = mock.MagicMock()
+ extension.type = None
+ instance_view.extensions = [extension]
+
+ # action
+ result = _get_extension_instance_name(instance_view, 'na', 'extension-name')
+
+ # assert
+ self.assertEqual(result, 'extension-name')
+
class FakedVM(object): # pylint: disable=too-few-public-methods
def __init__(self, nics=None, disks=None, os_disk=None):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "python scripts/dev_setup.py",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libssl-dev libffi-dev"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | adal==0.4.3
applicationinsights==0.10.0
argcomplete==1.8.0
astroid==2.11.7
attrs==22.2.0
autopep8==2.0.4
azure-batch==3.0.0
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli&subdirectory=src/azure-cli
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_acr&subdirectory=src/command_modules/azure-cli-acr
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_acs&subdirectory=src/command_modules/azure-cli-acs
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_appservice&subdirectory=src/command_modules/azure-cli-appservice
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_batch&subdirectory=src/command_modules/azure-cli-batch
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_billing&subdirectory=src/command_modules/azure-cli-billing
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_cdn&subdirectory=src/command_modules/azure-cli-cdn
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_cloud&subdirectory=src/command_modules/azure-cli-cloud
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_cognitiveservices&subdirectory=src/command_modules/azure-cli-cognitiveservices
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_component&subdirectory=src/command_modules/azure-cli-component
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_configure&subdirectory=src/command_modules/azure-cli-configure
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_consumption&subdirectory=src/command_modules/azure-cli-consumption
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_core&subdirectory=src/azure-cli-core
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_cosmosdb&subdirectory=src/command_modules/azure-cli-cosmosdb
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_dla&subdirectory=src/command_modules/azure-cli-dla
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_dls&subdirectory=src/command_modules/azure-cli-dls
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_feedback&subdirectory=src/command_modules/azure-cli-feedback
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_find&subdirectory=src/command_modules/azure-cli-find
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_interactive&subdirectory=src/command_modules/azure-cli-interactive
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_iot&subdirectory=src/command_modules/azure-cli-iot
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_keyvault&subdirectory=src/command_modules/azure-cli-keyvault
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_lab&subdirectory=src/command_modules/azure-cli-lab
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_monitor&subdirectory=src/command_modules/azure-cli-monitor
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_network&subdirectory=src/command_modules/azure-cli-network
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_nspkg&subdirectory=src/azure-cli-nspkg
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_profile&subdirectory=src/command_modules/azure-cli-profile
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_rdbms&subdirectory=src/command_modules/azure-cli-rdbms
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_redis&subdirectory=src/command_modules/azure-cli-redis
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_resource&subdirectory=src/command_modules/azure-cli-resource
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_role&subdirectory=src/command_modules/azure-cli-role
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_sf&subdirectory=src/command_modules/azure-cli-sf
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_sql&subdirectory=src/command_modules/azure-cli-sql
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_storage&subdirectory=src/command_modules/azure-cli-storage
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_taskhelp&subdirectory=src/command_modules/azure-cli-taskhelp
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_testsdk&subdirectory=src/azure-cli-testsdk
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_utility_automation&subdirectory=scripts
-e git+https://github.com/Azure/azure-cli.git@dd91433b36f95e9c849cbbcecccb0f610a8f0a3c#egg=azure_cli_vm&subdirectory=src/command_modules/azure-cli-vm
azure-common==1.1.28
azure-core==1.24.2
azure-datalake-store==0.0.12
azure-devtools==0.4.3
azure-graphrbac==0.30.0rc6
azure-keyvault==0.3.4
azure-mgmt-authorization==0.30.0rc6
azure-mgmt-batch==4.0.0
azure-mgmt-billing==0.1.0
azure-mgmt-cdn==0.30.2
azure-mgmt-cognitiveservices==1.0.0
azure-mgmt-compute==2.0.0
azure-mgmt-consumption==0.1.0
azure-mgmt-containerregistry==0.3.1
azure-mgmt-datalake-analytics==0.1.6
azure-mgmt-datalake-nspkg==3.0.1
azure-mgmt-datalake-store==0.1.6
azure-mgmt-devtestlabs==2.0.0
azure-mgmt-dns==1.0.1
azure-mgmt-documentdb==0.1.3
azure-mgmt-iothub==0.2.2
azure-mgmt-keyvault==0.40.0
azure-mgmt-monitor==0.2.1
azure-mgmt-network==1.2.0
azure-mgmt-nspkg==1.0.0
azure-mgmt-rdbms==0.1.0
azure-mgmt-redis==1.0.0
azure-mgmt-resource==1.1.0
azure-mgmt-sql==0.6.0
azure-mgmt-storage==1.1.0
azure-mgmt-trafficmanager==0.30.0
azure-mgmt-web==0.32.0
azure-monitor==0.3.0
azure-multiapi-storage==0.1.0
azure-nspkg==1.0.0
azure-servicefabric==5.6.130
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
colorama==0.3.7
ConfigArgParse==1.7
coverage==6.2
cryptography==40.0.2
docutils==0.18.1
flake8==5.0.4
futures==3.1.1
humanfriendly==2.4
idna==3.10
importlib-metadata==4.2.0
iniconfig==1.1.1
isodate==0.7.0
isort==5.10.1
jeepney==0.7.1
jmespath==0.10.0
keyring==23.4.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==5.2.0
msrest==0.4.29
msrestazure==0.4.34
nose==1.3.7
oauthlib==3.2.2
packaging==21.3
paramiko==2.0.2
pbr==6.1.1
pluggy==1.0.0
prompt-toolkit==3.0.36
py==1.11.0
pyasn1==0.5.1
pycodestyle==2.10.0
pycparser==2.21
pydocumentdb==2.3.5
pyflakes==2.5.0
Pygments==2.14.0
PyJWT==2.4.0
pylint==1.7.1
pyOpenSSL==16.2.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==3.11
readme-renderer==34.0
requests==2.9.1
requests-oauthlib==2.0.0
scp==0.15.0
SecretStorage==3.3.3
six==1.10.0
sshtunnel==0.4.0
tabulate==0.7.7
tomli==1.2.3
typed-ast==1.5.5
typing-extensions==4.1.1
urllib3==1.26.20
urllib3-secure-extra==0.1.0
vcrpy==1.10.3
vsts-cd-manager==1.0.2
wcwidth==0.2.13
webencodings==0.5.1
Whoosh==2.7.4
wrapt==1.16.0
xmltodict==0.14.2
zipp==3.6.0
| name: azure-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- adal==0.4.3
- applicationinsights==0.10.0
- argcomplete==1.8.0
- astroid==2.11.7
- attrs==22.2.0
- autopep8==2.0.4
- azure-batch==3.0.0
- azure-common==1.1.28
- azure-core==1.24.2
- azure-datalake-store==0.0.12
- azure-devtools==0.4.3
- azure-graphrbac==0.30.0rc6
- azure-keyvault==0.3.4
- azure-mgmt-authorization==0.30.0rc6
- azure-mgmt-batch==4.0.0
- azure-mgmt-billing==0.1.0
- azure-mgmt-cdn==0.30.2
- azure-mgmt-cognitiveservices==1.0.0
- azure-mgmt-compute==2.0.0
- azure-mgmt-consumption==0.1.0
- azure-mgmt-containerregistry==0.3.1
- azure-mgmt-datalake-analytics==0.1.6
- azure-mgmt-datalake-nspkg==3.0.1
- azure-mgmt-datalake-store==0.1.6
- azure-mgmt-devtestlabs==2.0.0
- azure-mgmt-dns==1.0.1
- azure-mgmt-documentdb==0.1.3
- azure-mgmt-iothub==0.2.2
- azure-mgmt-keyvault==0.40.0
- azure-mgmt-monitor==0.2.1
- azure-mgmt-network==1.2.0
- azure-mgmt-nspkg==1.0.0
- azure-mgmt-rdbms==0.1.0
- azure-mgmt-redis==1.0.0
- azure-mgmt-resource==1.1.0
- azure-mgmt-sql==0.6.0
- azure-mgmt-storage==1.1.0
- azure-mgmt-trafficmanager==0.30.0
- azure-mgmt-web==0.32.0
- azure-monitor==0.3.0
- azure-multiapi-storage==0.1.0
- azure-nspkg==1.0.0
- azure-servicefabric==5.6.130
- bleach==4.1.0
- cffi==1.15.1
- colorama==0.3.7
- configargparse==1.7
- coverage==6.2
- cryptography==40.0.2
- docutils==0.18.1
- flake8==5.0.4
- futures==3.1.1
- humanfriendly==2.4
- idna==3.10
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- isodate==0.7.0
- isort==5.10.1
- jeepney==0.7.1
- jmespath==0.10.0
- keyring==23.4.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==5.2.0
- msrest==0.4.29
- msrestazure==0.4.34
- nose==1.3.7
- oauthlib==3.2.2
- packaging==21.3
- paramiko==2.0.2
- pbr==6.1.1
- pip==9.0.1
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- py==1.11.0
- pyasn1==0.5.1
- pycodestyle==2.10.0
- pycparser==2.21
- pydocumentdb==2.3.5
- pyflakes==2.5.0
- pygments==2.14.0
- pyjwt==2.4.0
- pylint==1.7.1
- pyopenssl==16.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==3.11
- readme-renderer==34.0
- requests==2.9.1
- requests-oauthlib==2.0.0
- scp==0.15.0
- secretstorage==3.3.3
- setuptools==30.4.0
- six==1.10.0
- sshtunnel==0.4.0
- tabulate==0.7.7
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- urllib3-secure-extra==0.1.0
- vcrpy==1.10.3
- vsts-cd-manager==1.0.2
- wcwidth==0.2.13
- webencodings==0.5.1
- whoosh==2.7.4
- wrapt==1.16.0
- xmltodict==0.14.2
- zipp==3.6.0
prefix: /opt/conda/envs/azure-cli
| [
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_get_extension_instance_name_when_type_none"
] | [] | [
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_attach_existing_datadisk_on_vm",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_attach_new_datadisk_custom_on_vm",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_attach_new_datadisk_default_on_vm",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_deattach_disk_on_vm",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_disable_boot_diagnostics_on_vm",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_disable_encryption_error_cases_handling",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_enable_boot_diagnostics_on_vm_never_enabled",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_enable_boot_diagnostics_skip_when_enabled_already",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_enable_encryption_error_cases_handling",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_encryption_distro_check",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_get_access_extension_upgrade_info",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_get_extension_instance_name",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_merge_secrets",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_custom_vm_commands.py::Test_Vm_Custom::test_show_vmss_instance_view"
] | [] | MIT License | 1,487 | 222 | [
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py"
] |
acorg__slurm-pipeline-26 | 7a77fc4b53d04ce71219b516c0c01200f7f3ea89 | 2017-07-19 11:30:54 | 7a77fc4b53d04ce71219b516c0c01200f7f3ea89 | diff --git a/bin/slurm-pipeline-status.py b/bin/slurm-pipeline-status.py
index c0392d4..bd65b37 100755
--- a/bin/slurm-pipeline-status.py
+++ b/bin/slurm-pipeline-status.py
@@ -25,23 +25,23 @@ parser.add_argument(
'in JSON format.'))
parser.add_argument(
- '--squeueArgs', nargs='*', default=None,
+ '--squeueArgs', nargs='*',
help=('A list of arguments to pass to squeue (including the squeue '
- "command itself). If not specified, the user's login name will "
- 'be appended to squeue -u.'))
+ 'command itself). If not specified, "squeue -u USERNAME" is '
+ "used, where USERNAME is the user's login name."))
parser.add_argument(
'--printUnfinished', default=False, action='store_true',
help=('If specified, print a list of job ids that have not yet finished. '
- 'This can easily be used to cancel a job, via e.g., '
- '%s --printUnfinished -s spec.json | xargs scancel' % sys.argv[0]))
+ 'This can be used to cancel a job, via e.g., '
+ '%s --printUnfinished -s status.json | xargs scancel' % sys.argv[0]))
parser.add_argument(
'--printFinal', default=False, action='store_true',
help=('If specified, print a list of job ids issued by the final steps '
'of a specification. This can be used with the --startAfter option '
'to slurm-pipeline.py to make it schedule a different specification '
- 'to run only after the given specification finishes.'))
+ 'to run after the given specification is completely finished.'))
args = parser.parse_args()
diff --git a/setup.py b/setup.py
index b25b5cb..79394e7 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
from setuptools import setup
setup(name='slurm-pipeline',
- version='1.1.5',
+ version='1.1.6',
packages=['slurm_pipeline'],
include_package_data=True,
url='https://github.com/acorg/slurm-pipeline',
diff --git a/slurm_pipeline/base.py b/slurm_pipeline/base.py
index 83e586b..d9d2452 100644
--- a/slurm_pipeline/base.py
+++ b/slurm_pipeline/base.py
@@ -79,7 +79,7 @@ class SlurmPipelineBase(object):
'The name %r of step %d was already used in '
'an earlier step' % (stepName, count))
- if 'collect' in step and not step.get('dependencies', None):
+ if 'collect' in step and not step.get('dependencies'):
raise SpecificationError(
"Step %d (%r) is a 'collect' step but does not have any "
"dependencies" %
diff --git a/slurm_pipeline/pipeline.py b/slurm_pipeline/pipeline.py
index 817c9ae..503d51e 100644
--- a/slurm_pipeline/pipeline.py
+++ b/slurm_pipeline/pipeline.py
@@ -40,23 +40,28 @@ class SlurmPipeline(SlurmPipelineBase):
@raise SpecificationError: if there is anything wrong with the
specification.
"""
- if 'scheduledAt' in specification:
- raise SpecificationError(
- "The specification has a top-level 'scheduledAt' key, "
- 'but was not passed as a status specification')
+ SlurmPipelineBase.checkSpecification(specification)
for count, step in enumerate(specification['steps'], start=1):
- if not path.exists(step['script']):
+ try:
+ cwd = step['cwd']
+ except KeyError:
+ script = step['script']
+ else:
+ script = step['script']
+ if not path.isabs(script):
+ if cwd:
+ script = path.join(cwd, script)
+
+ if not path.exists(script):
raise SpecificationError(
'The script %r in step %d does not exist' %
(step['script'], count))
- if not os.access(step['script'], os.X_OK):
+ if not os.access(script, os.X_OK):
raise SpecificationError(
'The script %r in step %d is not executable' %
- (step['script'], count))
-
- SlurmPipelineBase.checkSpecification(specification)
+ (script, count))
def schedule(self, force=False, firstStep=None, lastStep=None, sleep=0.0,
scriptArgs=None, skip=None, startAfter=None, nice=None):
| Use the cwd attribute when checking for the existence of step scripts
The checking to see if scripts exist does not use the cwd attribute. So it's not possible to have a specification that uses cwd and a relative path to a script. | acorg/slurm-pipeline | diff --git a/test/test_pipeline.py b/test/test_pipeline.py
index a665d01..fb2ccfc 100644
--- a/test/test_pipeline.py
+++ b/test/test_pipeline.py
@@ -1,4 +1,4 @@
-from os import X_OK
+from os import X_OK, path
from unittest import TestCase
from six import assertRaisesRegex
from json import dumps
@@ -22,7 +22,7 @@ class TestSlurmPipeline(TestCase):
@patch('os.access')
@patch('os.path.exists')
- def testAccessFails(self, existsMock, accessMock):
+ def testNonexecutableScript(self, existsMock, accessMock):
"""
If os.access fails, a SpecificationError must be raised.
"""
@@ -39,6 +39,25 @@ class TestSlurmPipeline(TestCase):
]
})
+ @patch('os.access')
+ @patch('os.path.exists')
+ def testNonexistentScript(self, existsMock, accessMock):
+ """
+ If a step has a 'script' key that mentions a non-existent file, a
+ SpecificationError must be raised.
+ """
+ existsMock.return_value = False
+ error = "^The script 'script' in step 1 does not exist$"
+ assertRaisesRegex(self, SpecificationError, error, SlurmPipeline,
+ {
+ 'steps': [
+ {
+ 'name': 'name',
+ 'script': 'script',
+ },
+ ]
+ })
+
@patch('os.access')
@patch('os.path.exists')
def testAccessAndExistsAreCalled(self, existsMock, accessMock):
@@ -58,20 +77,55 @@ class TestSlurmPipeline(TestCase):
existsMock.assert_called_once_with('script')
accessMock.assert_called_once_with('script', X_OK)
- def testNonexistentScript(self):
+ @patch('os.access')
+ @patch('os.path.exists')
+ @patch('os.path.isabs')
+ def testAccessAndExistsAreCalledWithCwd(self, isabsMock, existsMock,
+ accessMock):
"""
- If a step has a 'script' key that mentions a non-existent file, a
- SpecificationError must be raised.
+ os.access, os.path.exists, and os.path.isabs must all be called as
+ expected, including the cwd from the step, as the specification is
+ checked.
"""
- error = "^The script 'script' in step 1 does not exist$"
- assertRaisesRegex(self, SpecificationError, error, SlurmPipeline,
- {
- 'steps': [
- {
- 'script': 'script',
- },
- ]
- })
+ isabsMock.return_value = False
+ SlurmPipeline(
+ {
+ 'steps': [
+ {
+ 'cwd': 'dir',
+ 'name': 'name',
+ 'script': 'script',
+ },
+ ]
+ })
+ script = path.join('dir', 'script')
+ isabsMock.assert_called_once_with('script')
+ existsMock.assert_called_once_with(script)
+ accessMock.assert_called_once_with(script, X_OK)
+
+ @patch('os.access')
+ @patch('os.path.exists')
+ @patch('os.path.isabs')
+ def testAccessAndExistsAreCalledWithAbsolutePathScript(
+ self, isabsMock, existsMock, accessMock):
+ """
+ os.access, os.path.exists, and os.path.isabs must all be called as
+ expected when the script path is absolute, as the specification is
+ checked.
+ """
+ SlurmPipeline(
+ {
+ 'steps': [
+ {
+ 'cwd': 'dir',
+ 'name': 'name',
+ 'script': '/bin/script',
+ },
+ ]
+ })
+ isabsMock.assert_called_once_with('/bin/script')
+ existsMock.assert_called_once_with('/bin/script')
+ accessMock.assert_called_once_with('/bin/script', X_OK)
@patch('os.access')
@patch('os.path.exists')
@@ -200,18 +254,6 @@ class TestSlurmPipeline(TestCase):
specification = sp.schedule()
self.assertIsInstance(specification['scheduledAt'], float)
- def testAlreadyScheduled(self):
- """
- If a specification with a top-level 'scheduledAt' key is passed to
- SlurmPipeline, a SpecificationError must be raised.
- """
- error = ("^The specification has a top-level 'scheduledAt' key, but "
- 'was not passed as a status specification$')
- assertRaisesRegex(self, SpecificationError, error, SlurmPipeline, {
- 'scheduledAt': None,
- 'steps': [],
- })
-
@patch('subprocess.check_output')
@patch('os.access')
@patch('os.path.exists')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"discover",
"mock",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-3.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | discover==0.4.0
exceptiongroup==1.2.2
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.10.0
-e git+https://github.com/acorg/slurm-pipeline.git@7a77fc4b53d04ce71219b516c0c01200f7f3ea89#egg=slurm_pipeline
tomli==2.2.1
| name: slurm-pipeline
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- discover==0.4.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.10.0
- tomli==2.2.1
prefix: /opt/conda/envs/slurm-pipeline
| [
"test/test_pipeline.py::TestSlurmPipeline::testAccessAndExistsAreCalledWithAbsolutePathScript",
"test/test_pipeline.py::TestSlurmPipeline::testAccessAndExistsAreCalledWithCwd"
] | [] | [
"test/test_pipeline.py::TestSlurmPipeline::testAccessAndExistsAreCalled",
"test/test_pipeline.py::TestSlurmPipeline::testCollectStepWithEmptyDependencies",
"test/test_pipeline.py::TestSlurmPipeline::testCollectStepWithNoDependencies",
"test/test_pipeline.py::TestSlurmPipeline::testCwdWithRelativeScriptPath",
"test/test_pipeline.py::TestSlurmPipeline::testDefaultNice",
"test/test_pipeline.py::TestSlurmPipeline::testErrorStep",
"test/test_pipeline.py::TestSlurmPipeline::testErrorStepWithNoDependencies",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepAndLastStepDifferent",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepAndLastStepSame",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepAndNoLastStep",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepOnly",
"test/test_pipeline.py::TestSlurmPipeline::testForce",
"test/test_pipeline.py::TestSlurmPipeline::testJSON",
"test/test_pipeline.py::TestSlurmPipeline::testLastStepBeforeFirstStep",
"test/test_pipeline.py::TestSlurmPipeline::testLastStepOnly",
"test/test_pipeline.py::TestSlurmPipeline::testNiceTooBig",
"test/test_pipeline.py::TestSlurmPipeline::testNiceTooSmall",
"test/test_pipeline.py::TestSlurmPipeline::testNonexecutableScript",
"test/test_pipeline.py::TestSlurmPipeline::testNonexistentFirstStep",
"test/test_pipeline.py::TestSlurmPipeline::testNonexistentLastStep",
"test/test_pipeline.py::TestSlurmPipeline::testNonexistentScript",
"test/test_pipeline.py::TestSlurmPipeline::testRepeatedTaskJobId",
"test/test_pipeline.py::TestSlurmPipeline::testRepeatedTaskName",
"test/test_pipeline.py::TestSlurmPipeline::testScheduledTime",
"test/test_pipeline.py::TestSlurmPipeline::testScriptArgs",
"test/test_pipeline.py::TestSlurmPipeline::testSingleCollectorDependencyNoJobIds",
"test/test_pipeline.py::TestSlurmPipeline::testSingleCollectorDependencyTaskNamesAndJobIds",
"test/test_pipeline.py::TestSlurmPipeline::testSingleDependencySynchronousTaskNamesJobIdsAndCalls",
"test/test_pipeline.py::TestSlurmPipeline::testSingleDependencyTaskNamesJobIdsAndCalls",
"test/test_pipeline.py::TestSlurmPipeline::testSkipNone",
"test/test_pipeline.py::TestSlurmPipeline::testSkipNonexistentStep",
"test/test_pipeline.py::TestSlurmPipeline::testSkipNonexistentSteps",
"test/test_pipeline.py::TestSlurmPipeline::testSkipTwo",
"test/test_pipeline.py::TestSlurmPipeline::testSleep",
"test/test_pipeline.py::TestSlurmPipeline::testSleepNotCalledByDefault",
"test/test_pipeline.py::TestSlurmPipeline::testSleepNotCalledWhenZero",
"test/test_pipeline.py::TestSlurmPipeline::testSpecificNice",
"test/test_pipeline.py::TestSlurmPipeline::testStartAfter",
"test/test_pipeline.py::TestSlurmPipeline::testStepStdout",
"test/test_pipeline.py::TestSlurmPipeline::testStepsDict",
"test/test_pipeline.py::TestSlurmPipeline::testStringNice",
"test/test_pipeline.py::TestSlurmPipeline::testTaskScheduleTime",
"test/test_pipeline.py::TestSlurmPipeline::testTasksFollowingSchedule"
] | [] | MIT License | 1,489 | 1,100 | [
"bin/slurm-pipeline-status.py",
"setup.py",
"slurm_pipeline/base.py",
"slurm_pipeline/pipeline.py"
] |
|
vertexproject__synapse-351 | c6db6455a18db6afcd2fd3ef86f0f7c8a10e1b51 | 2017-07-19 13:15:51 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/cores/common.py b/synapse/cores/common.py
index 1598c0aa1..070cd0c4e 100644
--- a/synapse/cores/common.py
+++ b/synapse/cores/common.py
@@ -2671,13 +2671,8 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
return ret
def _tufosByInetCidr(self, prop, valu, limit=None):
-
- ipv4str, cidr = valu.split('/', 1)
- ipv4addr, _ = s_datamodel.getTypeParse('inet:ipv4', ipv4str)
- mask = (2 ** (32 - int(cidr)))
- ipv4addr &= ~mask
-
- return self.getTufosBy('range', prop, (ipv4addr, ipv4addr + mask), limit=limit)
+ lowerbound, upperbound = self.getTypeCast('inet:ipv4:cidr', valu)
+ return self.getTufosBy('range', prop, (lowerbound, upperbound), limit=limit)
def _onTufoAddSynType(self, mesg):
tufo = mesg[1].get('node')
diff --git a/synapse/models/inet.py b/synapse/models/inet.py
index 9630c6e95..b3ae8faa7 100644
--- a/synapse/models/inet.py
+++ b/synapse/models/inet.py
@@ -27,10 +27,18 @@ def ipv4int(valu):
raise BadTypeValu(valu=valu, type='inet:ipv4', mesg=str(e))
masks = [(0xffffffff - (2 ** (32 - i) - 1)) for i in range(33)]
+cidrmasks = [((0xffffffff - (2 ** (32 - i) - 1)), (2 ** (32 - i))) for i in range(33)]
def ipv4mask(ipv4, mask):
return ipv4 & masks[mask]
+def ipv4cidr(valu):
+ _ipv4str, cidr = valu.split('/', 1)
+ _ipv4addr = ipv4int(_ipv4str)
+ mask = cidrmasks[int(cidr)]
+ lowerbound = _ipv4addr & mask[0]
+ return lowerbound, lowerbound + mask[1]
+
class IPv4Type(DataType):
def norm(self, valu, oldval=None):
if s_compat.isstr(valu):
@@ -287,6 +295,7 @@ class InetMod(CoreModule):
def initCoreModule(self):
# add an inet:defang cast to swap [.] to .
self.core.addTypeCast('inet:defang', castInetDeFang)
+ self.core.addTypeCast('inet:ipv4:cidr', ipv4cidr)
self.onFormNode('inet:fqdn', self.onTufoFormFqdn)
self.onFormNode('inet:passwd', self.onTufoFormPasswd)
self.revCoreModl()
@@ -332,7 +341,7 @@ class InetMod(CoreModule):
'inet:cidr4',
{'ctor': 'synapse.models.inet.CidrType', 'doc': 'An IPv4 CIDR type', 'ex': '1.2.3.0/24'}),
- ('inet:urlfile', {'subof': 'comp', 'fields':'url=inet:url,file=file:bytes',
+ ('inet:urlfile', {'subof': 'comp', 'fields': 'url=inet:url,file=file:bytes',
'doc': 'A File at a Universal Resource Locator (URL)'}),
('inet:net4',
{'subof': 'sepr', 'sep': '-', 'fields': 'min,inet:ipv4|max,inet:ipv4', 'doc': 'An IPv4 address range',
| cortex - lift by inet:cidr gives unexpected results
Lifting nodes by inet:cidr does not appear to behave as expected.
Make a cortex with a a few completely populated pair of /24 blocks
```
core = synapse.cortex.openurl('sqlite:///./cidr.db')
for i in range(0, 256):
r = core.formTufoByProp('inet:ipv4', "192.168.1.{}".format(i))
r = core.formTufoByProp('inet:ipv4', "192.168.2.{}".format(i))
r = core.formTufoByProp('inet:ipv4', "192.168.200.{}".format(i))
```
Then, connect to the cortex using commander and do some lifts:
```
python -m synapse.cortex sqlite:///./cidr.db
# Ensure we have nodes
cli> ask inet:ipv4=192.168.1.0
inet:ipv4 = 192.168.1.0
(1 results)
cli> ask inet:ipv4=192.168.1.200
inet:ipv4 = 192.168.1.200
(1 results)
cli> ask inet:ipv4=192.168.2.255
inet:ipv4 = 192.168.2.255
(1 results)
cli> ask inet:ipv4=192.168.200.100
inet:ipv4 = 192.168.200.100
(1 results)
# Now lift things using the cidr helper - this should yield nodes from 192.168.1.0 -> 192.168.1.255
cli> ask inet:ipv4*inet:cidr=192.168.1.0/24
(0 results)
# However, asking for all nodes in 192.168.2.0/24 does work
cli> ask inet:ipv4*inet:cidr=192.168.2.0/24
inet:ipv4 = 192.168.2.0
inet:ipv4 = 192.168.2.1
inet:ipv4 = 192.168.2.2
...
inet:ipv4 = 192.168.2.253
inet:ipv4 = 192.168.2.254
inet:ipv4 = 192.168.2.255
(256 results)
# As does asking for the 192.168.200/24
cli> ask inet:ipv4*inet:cidr=192.168.200.0/24
inet:ipv4 = 192.168.200.0
inet:ipv4 = 192.168.200.1
inet:ipv4 = 192.168.200.2
...
inet:ipv4 = 192.168.200.253
inet:ipv4 = 192.168.200.254
inet:ipv4 = 192.168.200.255
(256 results)
```
Cross checking with a few online CIDR calculators shows that the 192.168.1.xxx IP nodes would be in the IP range of 192.168.1.0/24
First reported by @therealsilence
@invisig0th is this a bug or a design limitation? | vertexproject/synapse | diff --git a/synapse/tests/test_cortex.py b/synapse/tests/test_cortex.py
index de94ab278..4255a95f0 100644
--- a/synapse/tests/test_cortex.py
+++ b/synapse/tests/test_cortex.py
@@ -623,14 +623,74 @@ class CortexTest(SynTest):
ipint, _ = tlib.getTypeParse('inet:ipv4', ip)
ipc = core.formTufoByProp('inet:ipv4', ipint)
- self.eq(len(core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.4/32')), 1)
- self.eq(len(core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.4/31')), 2)
- self.eq(len(core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.1/30')), 4)
- self.eq(len(core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.2/30')), 4)
- self.eq(len(core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.1/29')), 8)
- self.eq(len(core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.1/28')), 16)
-
- self.eq(len(core.getTufosBy('inet:cidr', 'inet:ipv4', '192.168.0.0/16')), 2)
+ # Validate the content we get from cidr lookups is correctly bounded
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.4/32')
+ self.eq(len(nodes), 1)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.4')
+
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.4/31')
+ self.eq(len(nodes), 2)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.4')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.5')
+
+ # 10.2.1.1/30 is 10.2.1.0 -> 10.2.1.3 but we don't have 10.2.1.0 in the core
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.1/30')
+ self.eq(len(nodes), 3)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.1')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.3')
+
+ # 10.2.1.2/30 is 10.2.1.0 -> 10.2.1.3 but we don't have 10.2.1.0 in the core
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.2/30')
+ self.eq(len(nodes), 3)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.1')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.3')
+
+ # 10.2.1.1/29 is 10.2.1.0 -> 10.2.1.7 but we don't have 10.2.1.0 in the core
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.1/29')
+ self.eq(len(nodes), 7)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.1')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.7')
+
+ # 10.2.1.8/29 is 10.2.1.8 -> 10.2.1.15
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.8/29')
+ self.eq(len(nodes), 8)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.8')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.15')
+
+ # 10.2.1.1/28 is 10.2.1.0 -> 10.2.1.15 but we don't have 10.2.1.0 in the core
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '10.2.1.1/28')
+ self.eq(len(nodes), 15)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.1')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '10.2.1.15')
+
+ # 192.168.0.0/16 is 192.168.0.0 -> 192.168.255.255 but we only have two nodes in this range
+ nodes = core.getTufosBy('inet:cidr', 'inet:ipv4', '192.168.0.0/16')
+ self.eq(len(nodes), 2)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.0.1')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.255.254')
def test_cortex_tufo_by_postgres(self):
@@ -2227,3 +2287,75 @@ class CortexTest(SynTest):
self.eq(node[1].get('foo:bar:duck'), 'mallard')
node2 = core.formTufoByProp('foo:bar', 'I am a robot', duck='mandarin')
self.eq(node2[1].get('foo:bar:duck'), 'mandarin')
+
+ def test_cortex_lift_by_cidr(self):
+
+ with s_cortex.openurl('ram:///') as core:
+ # Add a bunch of nodes
+ for n in range(0, 256):
+ r = core.formTufoByProp('inet:ipv4', '192.168.1.{}'.format(n))
+ r = core.formTufoByProp('inet:ipv4', '192.168.2.{}'.format(n))
+ r = core.formTufoByProp('inet:ipv4', '192.168.200.{}'.format(n))
+
+ # Confirm we have nodes
+ self.eq(len(core.eval('inet:ipv4="192.168.1.0"')), 1)
+ self.eq(len(core.eval('inet:ipv4="192.168.1.255"')), 1)
+ self.eq(len(core.eval('inet:ipv4="192.168.2.0"')), 1)
+ self.eq(len(core.eval('inet:ipv4="192.168.2.255"')), 1)
+ self.eq(len(core.eval('inet:ipv4="192.168.200.0"')), 1)
+
+ # Do cidr lifts
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.2.0/24')
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ self.eq(len(nodes), 256)
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[10][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.2.10')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.2.0')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.2.255')
+
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.200.0/24')
+ self.eq(len(nodes), 256)
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[10][1].get('inet:ipv4'))
+ self.true(test_repr.startswith('192.168.200.'))
+
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.1.0/24')
+ self.eq(len(nodes), 256)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[10][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.1.10')
+
+ # Try a complicated /24
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.1.1/24')
+ self.eq(len(nodes), 256)
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.1.0')
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[255][1].get('inet:ipv4'))
+ self.eq(test_repr, '192.168.1.255')
+
+ # Try a /23
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.0.0/23')
+ self.eq(len(nodes), 256)
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[10][1].get('inet:ipv4'))
+ self.true(test_repr.startswith('192.168.1.'))
+
+ # Try a /25
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.1.0/25')
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ self.eq(len(nodes), 128)
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[-1][1].get('inet:ipv4'))
+ self.true(test_repr.startswith('192.168.1.127'))
+
+ # Try a /25
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.1.128/25')
+ nodes.sort(key=lambda x: x[1].get('inet:ipv4'))
+ self.eq(len(nodes), 128)
+ test_repr = core.getTypeRepr('inet:ipv4', nodes[0][1].get('inet:ipv4'))
+ self.true(test_repr.startswith('192.168.1.128'))
+
+ # Try a /16
+ nodes = core.eval('inet:ipv4*inet:cidr=192.168.0.0/16')
+ self.eq(len(nodes), 256 * 3)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
coverage==6.2
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
-e git+https://github.com/vertexproject/synapse.git@c6db6455a18db6afcd2fd3ef86f0f7c8a10e1b51#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- coverage==6.2
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- pycparser==2.21
- pyopenssl==23.2.0
- pytest-cov==4.0.0
- tomli==1.2.3
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_cortex.py::CortexTest::test_cortex_lift_by_cidr",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_by_default"
] | [] | [
"synapse/tests/test_cortex.py::CortexTest::test_cortex_addmodel",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_by_type",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_bytype",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_add_tufo",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_atlimit",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_del_tufo",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_disable",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_new",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_oneref",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_set",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_tags",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_under_limit",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_choptag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_comp",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_dict",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_enforce",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_events",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_fire_set",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_getbytag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ingest",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_isnew",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_lmdb",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_local",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax_epoch",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_modlrevs",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_modlvers",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module_datamodel_migration",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module_datamodel_migration_persistent",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_norm_fail",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_notguidform",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ram",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ramhost",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ramtyperange",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_reqstor",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_rev0",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_rev0_lmdb",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_rev0_savefd",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_rev0_savefd_lmdb",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_rev0_savefd_sqlite",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_savefd",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_seed",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_seq",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splice_propdel",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splicefd",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splicepump",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splices",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splices_errs",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_sqlite3",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_stats",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tag_ival",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tagform",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tags",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tlib_persistence",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_del",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_list",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_pop",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprop",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprops",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_tag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_xact_deadlock"
] | [] | Apache License 2.0 | 1,490 | 910 | [
"synapse/cores/common.py",
"synapse/models/inet.py"
] |
|
numpy__numpydoc-101 | 057ef572dd5ea547af2832e1b68d934cf953eafe | 2017-07-19 19:19:01 | 8c1e85c746d1c95b9433b2ae97057b7f447c83d1 | diff --git a/numpydoc/docscrape.py b/numpydoc/docscrape.py
index 074a7f7..45883d0 100644
--- a/numpydoc/docscrape.py
+++ b/numpydoc/docscrape.py
@@ -136,7 +136,7 @@ class NumpyDocString(collections.Mapping):
def __setitem__(self, key, val):
if key not in self._parsed_data:
- warn("Unknown section %s" % key)
+ self._error_location("Unknown section %s" % key, error=False)
else:
self._parsed_data[key] = val
@@ -331,19 +331,8 @@ class NumpyDocString(collections.Mapping):
section = (s.capitalize() for s in section.split(' '))
section = ' '.join(section)
if self.get(section):
- if hasattr(self, '_obj'):
- # we know where the docs came from:
- try:
- filename = inspect.getsourcefile(self._obj)
- except TypeError:
- filename = None
- msg = ("The section %s appears twice in "
- "the docstring of %s in %s." %
- (section, self._obj, filename))
- raise ValueError(msg)
- else:
- msg = ("The section %s appears twice" % section)
- raise ValueError(msg)
+ self._error_location("The section %s appears twice"
+ % section)
if section in ('Parameters', 'Returns', 'Yields', 'Raises',
'Warns', 'Other Parameters', 'Attributes',
@@ -356,6 +345,20 @@ class NumpyDocString(collections.Mapping):
else:
self[section] = content
+ def _error_location(self, msg, error=True):
+ if hasattr(self, '_obj'):
+ # we know where the docs came from:
+ try:
+ filename = inspect.getsourcefile(self._obj)
+ except TypeError:
+ filename = None
+ msg = msg + (" in the docstring of %s in %s."
+ % (self._obj, filename))
+ if error:
+ raise ValueError(msg)
+ else:
+ warn(msg)
+
# string conversion routines
def _str_header(self, name, symbol='-'):
| Unknown Section error has no context.
Similar to #95 the "Unknown section: X" error has no context, making it really hard to find and fix :-/ | numpy/numpydoc | diff --git a/numpydoc/tests/test_docscrape.py b/numpydoc/tests/test_docscrape.py
index 297a0ac..2dc45e3 100644
--- a/numpydoc/tests/test_docscrape.py
+++ b/numpydoc/tests/test_docscrape.py
@@ -3,6 +3,7 @@ from __future__ import division, absolute_import, print_function
import sys
import textwrap
+import warnings
import jinja2
@@ -151,13 +152,16 @@ def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
+
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
+
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
+
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
@@ -167,6 +171,7 @@ def test_parameters():
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
+
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
@@ -174,6 +179,7 @@ def test_other_parameters():
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
+
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
@@ -188,6 +194,7 @@ def test_returns():
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
+
def test_yields():
section = doc_yields['Yields']
assert_equal(len(section), 3)
@@ -200,6 +207,7 @@ def test_yields():
assert desc[0].startswith('The number of')
assert desc[0].endswith(end)
+
def test_returnyield():
doc_text = """
Test having returns and yields.
@@ -289,26 +297,31 @@ def test_notes():
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
+
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
+
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
+
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
-def non_blank_line_by_line_compare(a,b):
+
+def non_blank_line_by_line_compare(a, b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
assert_list_equal(a, b)
+
def test_str():
# doc_txt has the order of Notes and See Also sections flipped.
# This should be handled automatically, and so, one thing this test does
@@ -595,15 +608,18 @@ doc2 = NumpyDocString("""
If None, the index is into the flattened array, otherwise along
the specified axis""")
+
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
+
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
+
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
@@ -614,14 +630,17 @@ def test_escape_stars():
fdoc = FunctionDoc(func=my_func)
assert_equal(fdoc['Signature'], 'my_func(a, b, \*\*kwargs)')
+
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
+
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
+
doc5 = NumpyDocString(
"""
a.something()
@@ -637,18 +656,21 @@ doc5 = NumpyDocString(
If needed
""")
+
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
+
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
+
def test_see_also():
doc6 = NumpyDocString(
"""
@@ -726,12 +748,45 @@ def test_see_also_print():
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
+
+def test_unknown_section():
+ doc_text = """
+Test having an unknown section
+
+Mope
+----
+This should be ignored and warned about
+"""
+
+ class BadSection(object):
+ """Class with bad section.
+
+ Nope
+ ----
+ This class has a nope section.
+ """
+ pass
+
+ with warnings.catch_warnings(record=True) as w:
+ NumpyDocString(doc_text)
+ assert len(w) == 1
+ assert "Unknown section Mope" == str(w[0].message)
+
+ with warnings.catch_warnings(record=True) as w:
+ SphinxClassDoc(BadSection)
+ assert len(w) == 1
+ assert_true('test_docscrape.test_unknown_section.<locals>.BadSection'
+ in str(w[0].message)
+ or 'test_docscrape.BadSection' in str(w[0].message))
+
+
doc7 = NumpyDocString("""
Doc starts on second line.
""")
+
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
@@ -762,6 +817,7 @@ def test_unicode():
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
+
def test_plot_examples():
cfg = dict(use_plots=True)
@@ -785,6 +841,7 @@ def test_plot_examples():
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
+
def test_class_members():
class Dummy(object):
@@ -866,6 +923,7 @@ def test_class_members():
else:
assert 'Spammity index' in str(doc), str(doc)
+
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
@@ -911,6 +969,7 @@ class_doc_txt = """
For usage examples, see `ode`.
"""
+
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
@@ -949,6 +1008,7 @@ def test_class_members_doc():
""")
+
def test_class_members_doc_sphinx():
class Foo:
@property
@@ -997,6 +1057,7 @@ def test_class_members_doc_sphinx():
""")
+
def test_templated_sections():
doc = SphinxClassDoc(None, class_doc_txt,
config={'template': jinja2.Template('{{examples}}{{parameters}}')})
@@ -1020,8 +1081,6 @@ def test_templated_sections():
""")
-
-
if __name__ == "__main__":
import nose
nose.run()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
nose==1.3.7
-e git+https://github.com/numpy/numpydoc.git@057ef572dd5ea547af2832e1b68d934cf953eafe#egg=numpydoc
packaging==21.3
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
requests==2.27.1
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: numpydoc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/numpydoc
| [
"numpydoc/tests/test_docscrape.py::test_unknown_section"
] | [] | [
"numpydoc/tests/test_docscrape.py::test_signature",
"numpydoc/tests/test_docscrape.py::test_summary",
"numpydoc/tests/test_docscrape.py::test_extended_summary",
"numpydoc/tests/test_docscrape.py::test_parameters",
"numpydoc/tests/test_docscrape.py::test_other_parameters",
"numpydoc/tests/test_docscrape.py::test_returns",
"numpydoc/tests/test_docscrape.py::test_yields",
"numpydoc/tests/test_docscrape.py::test_returnyield",
"numpydoc/tests/test_docscrape.py::test_section_twice",
"numpydoc/tests/test_docscrape.py::test_notes",
"numpydoc/tests/test_docscrape.py::test_references",
"numpydoc/tests/test_docscrape.py::test_examples",
"numpydoc/tests/test_docscrape.py::test_index",
"numpydoc/tests/test_docscrape.py::test_str",
"numpydoc/tests/test_docscrape.py::test_yield_str",
"numpydoc/tests/test_docscrape.py::test_sphinx_str",
"numpydoc/tests/test_docscrape.py::test_sphinx_yields_str",
"numpydoc/tests/test_docscrape.py::test_parameters_without_extended_description",
"numpydoc/tests/test_docscrape.py::test_escape_stars",
"numpydoc/tests/test_docscrape.py::test_empty_extended_summary",
"numpydoc/tests/test_docscrape.py::test_raises",
"numpydoc/tests/test_docscrape.py::test_warns",
"numpydoc/tests/test_docscrape.py::test_see_also",
"numpydoc/tests/test_docscrape.py::test_see_also_parse_error",
"numpydoc/tests/test_docscrape.py::test_see_also_print",
"numpydoc/tests/test_docscrape.py::test_empty_first_line",
"numpydoc/tests/test_docscrape.py::test_no_summary",
"numpydoc/tests/test_docscrape.py::test_unicode",
"numpydoc/tests/test_docscrape.py::test_plot_examples",
"numpydoc/tests/test_docscrape.py::test_class_members",
"numpydoc/tests/test_docscrape.py::test_duplicate_signature",
"numpydoc/tests/test_docscrape.py::test_class_members_doc",
"numpydoc/tests/test_docscrape.py::test_class_members_doc_sphinx",
"numpydoc/tests/test_docscrape.py::test_templated_sections"
] | [] | BSD License | 1,491 | 532 | [
"numpydoc/docscrape.py"
] |
|
force-h2020__force-bdss-37 | ddccc7b7caf43ae6e2b20543239aff9e1d9df8e6 | 2017-07-20 16:44:13 | ddccc7b7caf43ae6e2b20543239aff9e1d9df8e6 | diff --git a/force_bdss/api.py b/force_bdss/api.py
index 1a5aeed..c1022dc 100644
--- a/force_bdss/api.py
+++ b/force_bdss/api.py
@@ -1,15 +1,21 @@
from .base_extension_plugin import BaseExtensionPlugin # noqa
from .id_generators import bundle_id # noqa
-from .data_sources.i_data_source_bundle import IDataSourceBundle # noqa
-from .mco.i_multi_criteria_optimizer_bundle import IMultiCriteriaOptimizerBundle # noqa
-from .kpi.i_kpi_calculator_bundle import IKPICalculatorBundle # noqa
+
from .data_sources.base_data_source_model import BaseDataSourceModel # noqa
from .data_sources.data_source_result import DataSourceResult # noqa
from .data_sources.data_source_parameters import DataSourceParameters # noqa
from .data_sources.base_data_source import BaseDataSource # noqa
+from .data_sources.base_data_source_bundle import BaseDataSourceBundle # noqa
+from .data_sources.i_data_source_bundle import IDataSourceBundle # noqa
+
from .kpi.base_kpi_calculator import BaseKPICalculator # noqa
from .kpi.kpi_calculator_result import KPICalculatorResult # noqa
from .kpi.base_kpi_calculator_model import BaseKPICalculatorModel # noqa
+from .kpi.base_kpi_calculator_bundle import BaseKPICalculatorBundle # noqa
+from .kpi.i_kpi_calculator_bundle import IKPICalculatorBundle # noqa
+
from .mco.base_mco_model import BaseMCOModel # noqa
from .mco.base_mco_communicator import BaseMCOCommunicator # noqa
from .mco.base_multi_criteria_optimizer import BaseMultiCriteriaOptimizer # noqa
+from .mco.base_multi_criteria_optimizer_bundle import BaseMultiCriteriaOptimizerBundle # noqa
+from .mco.i_multi_criteria_optimizer_bundle import IMultiCriteriaOptimizerBundle # noqa
diff --git a/force_bdss/core_plugins/csv_extractor/csv_extractor/csv_extractor_bundle.py b/force_bdss/core_plugins/csv_extractor/csv_extractor/csv_extractor_bundle.py
index a44b5e1..55f998c 100644
--- a/force_bdss/core_plugins/csv_extractor/csv_extractor/csv_extractor_bundle.py
+++ b/force_bdss/core_plugins/csv_extractor/csv_extractor/csv_extractor_bundle.py
@@ -1,15 +1,16 @@
-from traits.api import provides, HasStrictTraits, String
+from traits.api import String
-from force_bdss.api import bundle_id, IDataSourceBundle
+from force_bdss.api import bundle_id, BaseDataSourceBundle
from .csv_extractor_model import CSVExtractorModel
from .csv_extractor_data_source import CSVExtractorDataSource
-@provides(IDataSourceBundle)
-class CSVExtractorBundle(HasStrictTraits):
+class CSVExtractorBundle(BaseDataSourceBundle):
id = String(bundle_id("enthought", "csv_extractor"))
+ name = String("CSV Extractor")
+
def create_model(self, model_data=None):
if model_data is None:
model_data = {}
diff --git a/force_bdss/core_plugins/dummy_kpi/kpi_adder/kpi_adder_bundle.py b/force_bdss/core_plugins/dummy_kpi/kpi_adder/kpi_adder_bundle.py
index a06cf88..ab40c71 100644
--- a/force_bdss/core_plugins/dummy_kpi/kpi_adder/kpi_adder_bundle.py
+++ b/force_bdss/core_plugins/dummy_kpi/kpi_adder/kpi_adder_bundle.py
@@ -1,16 +1,16 @@
-from traits.api import provides, HasStrictTraits, String
+from traits.api import String
-from force_bdss.api import bundle_id
-from force_bdss.api import IKPICalculatorBundle
+from force_bdss.api import bundle_id, BaseKPICalculatorBundle
from .kpi_adder_model import KPIAdderModel
from .kpi_adder_calculator import KPIAdderCalculator
-@provides(IKPICalculatorBundle)
-class KPIAdderBundle(HasStrictTraits):
+class KPIAdderBundle(BaseKPICalculatorBundle):
id = String(bundle_id("enthought", "kpi_adder"))
+ name = String("KPI Adder")
+
def create_model(self, model_data=None):
if model_data is None:
model_data = {}
diff --git a/force_bdss/core_plugins/dummy_mco/dakota/dakota_bundle.py b/force_bdss/core_plugins/dummy_mco/dakota/dakota_bundle.py
index 07e4dd3..e4d9f44 100644
--- a/force_bdss/core_plugins/dummy_mco/dakota/dakota_bundle.py
+++ b/force_bdss/core_plugins/dummy_mco/dakota/dakota_bundle.py
@@ -1,15 +1,16 @@
-from traits.api import HasStrictTraits, provides, String
-from force_bdss.api import bundle_id, IMultiCriteriaOptimizerBundle
+from traits.api import String
+from force_bdss.api import bundle_id, BaseMultiCriteriaOptimizerBundle
from .dakota_communicator import DakotaCommunicator
from .dakota_model import DakotaModel
from .dakota_optimizer import DakotaOptimizer
-@provides(IMultiCriteriaOptimizerBundle)
-class DakotaBundle(HasStrictTraits):
+class DakotaBundle(BaseMultiCriteriaOptimizerBundle):
id = String(bundle_id("enthought", "dakota"))
+ name = "Dakota"
+
def create_model(self, model_data=None):
if model_data is None:
model_data = {}
diff --git a/force_bdss/data_sources/base_data_source_bundle.py b/force_bdss/data_sources/base_data_source_bundle.py
new file mode 100644
index 0000000..ab53763
--- /dev/null
+++ b/force_bdss/data_sources/base_data_source_bundle.py
@@ -0,0 +1,56 @@
+import abc
+from traits.api import ABCHasStrictTraits, provides, String
+
+from .i_data_source_bundle import IDataSourceBundle
+
+
+@provides(IDataSourceBundle)
+class BaseDataSourceBundle(ABCHasStrictTraits):
+ """Base class for DataSource bundles. Reimplement this class to
+ create your own DataSource.
+ """
+ # NOTE: changes to this class must be ported also to the IDataSourceBundle
+
+ #: Unique identifier that identifies the bundle uniquely in the
+ #: universe of bundles. Create one with the function bundle_id()
+ id = String()
+
+ #: A human readable name of the bundle. Spaces allowed
+ name = String()
+
+ @abc.abstractmethod
+ def create_data_source(self, application, model):
+ """Factory method.
+ Must return the bundle-specific BaseDataSource instance.
+
+ Parameters
+ ----------
+ application: Application
+ The envisage application.
+ model: BaseDataSourceModel
+ The model of the data source, instantiated with create_model()
+
+ Returns
+ -------
+ BaseDataSource
+ The specific instance of the generated DataSource
+ """
+
+ @abc.abstractmethod
+ def create_model(self, model_data=None):
+ """Factory method.
+ Creates the model object (or network of model objects) of the KPI
+ calculator. The model can provide a traits UI View according to
+ traitsui specifications, so that a UI can be provided automatically.
+
+ Parameters
+ ----------
+ model_data: dict or None
+ A dictionary containing the information to recreate the model.
+ If None, an empty (with defaults) model will be returned.
+
+ Returns
+ -------
+ BaseDataSourceModel
+ The model
+ """
diff --git a/force_bdss/data_sources/i_data_source_bundle.py b/force_bdss/data_sources/i_data_source_bundle.py
index 5270245..8ed2556 100644
--- a/force_bdss/data_sources/i_data_source_bundle.py
+++ b/force_bdss/data_sources/i_data_source_bundle.py
@@ -13,7 +13,6 @@ class IDataSourceBundle(Interface):
"""Factory method.
Must return the bundle-specific BaseDataSource instance.
"""
- pass
def create_model(self, model_data=None):
"""Factory method.
diff --git a/force_bdss/kpi/base_kpi_calculator_bundle.py b/force_bdss/kpi/base_kpi_calculator_bundle.py
new file mode 100644
index 0000000..5ae4063
--- /dev/null
+++ b/force_bdss/kpi/base_kpi_calculator_bundle.py
@@ -0,0 +1,58 @@
+import abc
+from traits.api import ABCHasStrictTraits, provides, String
+
+from .i_kpi_calculator_bundle import IKPICalculatorBundle
+
+
+@provides(IKPICalculatorBundle)
+class BaseKPICalculatorBundle(ABCHasStrictTraits):
+ """Base class for the Key Performance Indicator calculator bundles.
+ Inherit from this class to create a bundle, and reimplement the abstract
+ methods.
+ """
+ # NOTE: any changes in this interface must be ported to
+ # IKPICalculatorBundle
+
+ #: A unique ID generated with bundle_id() routine
+ id = String()
+
+ #: A UI friendly name for the bundle. Can contain spaces.
+ name = String()
+
+ @abc.abstractmethod
+ def create_kpi_calculator(self, application, model):
+ """Factory method.
+ Creates and returns an instance of a KPI Calculator, associated
+ to the given application and model.
+
+ Parameters
+ ----------
+ application: Application
+ The envisage application.
+ model: BaseKPICalculatorModel
+ The model of the calculator, instantiated with create_model()
+
+ Returns
+ -------
+ BaseKPICalculator
+ The specific instance of the generated KPICalculator
+ """
+
+ @abc.abstractmethod
+ def create_model(self, model_data=None):
+ """Factory method.
+ Creates the model object (or network of model objects) of the KPI
+ calculator. The model can provide a traits UI View according to
+ traitsui specifications, so that a UI can be provided automatically.
+
+ Parameters
+ ----------
+ model_data: dict or None
+ A dictionary containing the information to recreate the model.
+ If None, an empty (with defaults) model will be returned.
+
+ Returns
+ -------
+ BaseKPICalculatorModel
+ The model
+ """
diff --git a/force_bdss/kpi/i_kpi_calculator_bundle.py b/force_bdss/kpi/i_kpi_calculator_bundle.py
index d4e8992..938e7de 100644
--- a/force_bdss/kpi/i_kpi_calculator_bundle.py
+++ b/force_bdss/kpi/i_kpi_calculator_bundle.py
@@ -2,8 +2,12 @@ from traits.api import Interface, String
class IKPICalculatorBundle(Interface):
+ """Envisage required interface for the BaseKPICalculatorBundle.
+ You should not need to use this directly."""
id = String()
+ name = String()
+
def create_kpi_calculator(self, application, model):
pass
diff --git a/force_bdss/mco/base_multi_criteria_optimizer_bundle.py b/force_bdss/mco/base_multi_criteria_optimizer_bundle.py
new file mode 100644
index 0000000..228ec4c
--- /dev/null
+++ b/force_bdss/mco/base_multi_criteria_optimizer_bundle.py
@@ -0,0 +1,76 @@
+import abc
+
+from traits.api import ABCHasStrictTraits, String
+from traits.has_traits import provides
+
+from force_bdss.mco.i_multi_criteria_optimizer_bundle import (
+ IMultiCriteriaOptimizerBundle
+)
+
+
+@provides(IMultiCriteriaOptimizerBundle)
+class BaseMultiCriteriaOptimizerBundle(ABCHasStrictTraits):
+ """Base class for the MultiCriteria Optimizer bundle.
+ """
+ # NOTE: any changes to the interface of this class must be replicated
+ # in the IMultiCriteriaOptimizerBundle interface class.
+
+ #: A unique ID produced with the bundle_id() routine.
+ id = String()
+
+ #: A user friendly name of the bundle. Spaces allowed.
+ name = String()
+
+ @abc.abstractmethod
+ def create_optimizer(self, application, model):
+ """Factory method.
+ Creates the optimizer with the given application
+ and model and returns it to the caller.
+
+ Parameters
+ ----------
+ application: Application
+ The envisage application instance
+ model: BaseMCOModel
+ The model to associate to the optimizer, instantiated through
+ create_model()
+
+ Returns
+ -------
+ BaseMCOOptimizer
+ The optimizer
+ """
+
+ @abc.abstractmethod
+ def create_model(self, model_data=None):
+ """Factory method.
+ Creates the model object (or network of model objects) of the MCO.
+ The model can provide a traits UI View according to traitsui
+ specifications, so that a UI can be provided automatically.
+
+ Parameters
+ ----------
+ model_data: dict or None
+ A dictionary of data that can be interpreted appropriately to
+ recreate the model. If None, an empty (with defaults) model will
+ be created and returned.
+
+ Returns
+ -------
+ BaseMCOModel
+ The MCOModel
+ """
+
+ @abc.abstractmethod
+ def create_communicator(self, application, model):
+ """Factory method. Returns the communicator class that allows
+ exchange between the MCO and the evaluator code.
+
+ Parameters
+ ----------
+ application: Application
+ The envisage application instance
+ model: BaseMCOModel
+ The model to associate to the optimizer, instantiated through
+ create_model()
+ """
diff --git a/force_bdss/mco/i_multi_criteria_optimizer_bundle.py b/force_bdss/mco/i_multi_criteria_optimizer_bundle.py
index cb42229..82a857a 100644
--- a/force_bdss/mco/i_multi_criteria_optimizer_bundle.py
+++ b/force_bdss/mco/i_multi_criteria_optimizer_bundle.py
@@ -2,13 +2,18 @@ from traits.api import Interface, String
class IMultiCriteriaOptimizerBundle(Interface):
+ """Interface for the MultiCriteria Optimizer bundle.
+ You should not need it, as its main use is for envisage support.
+ """
id = String()
+ name = String()
+
def create_optimizer(self, application, model):
pass
def create_model(self, model_data=None):
pass
- def create_communicator(self, model_data):
+ def create_communicator(self, application, model):
pass
| Introduce base class for the bundle.
We should have a base class for the bundle, using `@provides`, so that plugin implementers don't have to worry about that.
| force-h2020/force-bdss | diff --git a/force_bdss/data_sources/tests/test_base_data_source_bundle.py b/force_bdss/data_sources/tests/test_base_data_source_bundle.py
new file mode 100644
index 0000000..3564a7e
--- /dev/null
+++ b/force_bdss/data_sources/tests/test_base_data_source_bundle.py
@@ -0,0 +1,23 @@
+import unittest
+
+from force_bdss.data_sources.base_data_source_bundle import \
+ BaseDataSourceBundle
+
+
+class DummyDataSourceBundle(BaseDataSourceBundle):
+ id = "foo"
+
+ name = "bar"
+
+ def create_data_source(self, application, model):
+ pass
+
+ def create_model(self, model_data=None):
+ pass
+
+
+class TestBaseDataSourceBundle(unittest.TestCase):
+ def test_initialization(self):
+ bundle = DummyDataSourceBundle()
+ self.assertEqual(bundle.id, 'foo')
+ self.assertEqual(bundle.name, 'bar')
diff --git a/force_bdss/kpi/tests/test_base_kpi_calculator_bundle.py b/force_bdss/kpi/tests/test_base_kpi_calculator_bundle.py
new file mode 100644
index 0000000..30dc60e
--- /dev/null
+++ b/force_bdss/kpi/tests/test_base_kpi_calculator_bundle.py
@@ -0,0 +1,23 @@
+import unittest
+
+from force_bdss.kpi.base_kpi_calculator_bundle import \
+ BaseKPICalculatorBundle
+
+
+class DummyKPICalculatorBundle(BaseKPICalculatorBundle):
+ id = "foo"
+
+ name = "bar"
+
+ def create_kpi_calculator(self, application, model):
+ pass
+
+ def create_model(self, model_data=None):
+ pass
+
+
+class TestBaseKPICalculatorBundle(unittest.TestCase):
+ def test_initialization(self):
+ bundle = DummyKPICalculatorBundle()
+ self.assertEqual(bundle.id, 'foo')
+ self.assertEqual(bundle.name, 'bar')
diff --git a/force_bdss/mco/tests/test_base_multi_criteria_optimizer_bundle.py b/force_bdss/mco/tests/test_base_multi_criteria_optimizer_bundle.py
new file mode 100644
index 0000000..de89488
--- /dev/null
+++ b/force_bdss/mco/tests/test_base_multi_criteria_optimizer_bundle.py
@@ -0,0 +1,27 @@
+import unittest
+
+from force_bdss.mco.base_multi_criteria_optimizer_bundle import (
+ BaseMultiCriteriaOptimizerBundle
+)
+
+
+class DummyMCOBundle(BaseMultiCriteriaOptimizerBundle):
+ id = "foo"
+
+ name = "bar"
+
+ def create_optimizer(self, application, model):
+ pass
+
+ def create_model(self, model_data=None):
+ pass
+
+ def create_communicator(self, model_data):
+ pass
+
+
+class TestBaseDataSourceBundle(unittest.TestCase):
+ def test_initialization(self):
+ bundle = DummyMCOBundle()
+ self.assertEqual(bundle.id, 'foo')
+ self.assertEqual(bundle.name, 'bar')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 7
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apptools==5.3.0
click==8.1.8
envisage==7.0.3
exceptiongroup==1.2.2
-e git+https://github.com/force-h2020/force-bdss.git@ddccc7b7caf43ae6e2b20543239aff9e1d9df8e6#egg=force_bdss
importlib_metadata==8.6.1
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
pyface==8.0.0
pytest==8.3.5
six==1.17.0
stevedore==5.4.1
tomli==2.2.1
traits==7.0.2
traitsui==8.0.0
zipp==3.21.0
| name: force-bdss
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apptools==5.3.0
- click==8.1.8
- envisage==7.0.3
- exceptiongroup==1.2.2
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- pyface==8.0.0
- pytest==8.3.5
- six==1.17.0
- stevedore==5.4.1
- tomli==2.2.1
- traits==7.0.2
- traitsui==8.0.0
- zipp==3.21.0
prefix: /opt/conda/envs/force-bdss
| [
"force_bdss/data_sources/tests/test_base_data_source_bundle.py::TestBaseDataSourceBundle::test_initialization",
"force_bdss/kpi/tests/test_base_kpi_calculator_bundle.py::TestBaseKPICalculatorBundle::test_initialization",
"force_bdss/mco/tests/test_base_multi_criteria_optimizer_bundle.py::TestBaseDataSourceBundle::test_initialization"
] | [] | [] | [] | BSD 2-Clause "Simplified" License | 1,493 | 3,431 | [
"force_bdss/api.py",
"force_bdss/core_plugins/csv_extractor/csv_extractor/csv_extractor_bundle.py",
"force_bdss/core_plugins/dummy_kpi/kpi_adder/kpi_adder_bundle.py",
"force_bdss/core_plugins/dummy_mco/dakota/dakota_bundle.py",
"force_bdss/data_sources/i_data_source_bundle.py",
"force_bdss/kpi/i_kpi_calculator_bundle.py",
"force_bdss/mco/i_multi_criteria_optimizer_bundle.py"
] |
|
pydicom__pydicom-429 | 352049ca4e2bd53ef689484aed57933d067512a7 | 2017-07-20 19:35:06 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | vsoch: Even if it's the same speed, I would say this functionality is still useful for applications that may want to expose only a particular set of tags.
darcymason: LGTM for me... can we have a second? | diff --git a/pydicom/filereader.py b/pydicom/filereader.py
index b9874fc41..692cc43d9 100644
--- a/pydicom/filereader.py
+++ b/pydicom/filereader.py
@@ -14,7 +14,7 @@ import zlib
from io import BytesIO
from pydicom.misc import size_in_bytes
-from pydicom.tag import TupleTag
+from pydicom.tag import TupleTag, Tag, BaseTag
from pydicom.dataelem import RawDataElement
from pydicom.util.hexutil import bytes2hex
from pydicom.valuerep import extra_length_VRs
@@ -41,7 +41,7 @@ from pydicom.dataset import (
)
from pydicom.dicomdir import DicomDir
-from pydicom.datadict import dictionary_VR
+from pydicom.datadict import dictionary_VR, tag_for_keyword
from pydicom.dataelem import DataElement
from pydicom.tag import (
ItemTag,
@@ -159,7 +159,8 @@ def data_element_generator(fp,
is_little_endian,
stop_when=None,
defer_size=None,
- encoding=default_encoding):
+ encoding=default_encoding,
+ specific_tags=None):
"""Create a generator to efficiently return the raw data elements.
@@ -177,6 +178,8 @@ def data_element_generator(fp,
See ``read_file`` for parameter info.
encoding :
Encoding scheme
+ specific_tags : list or None
+ See ``read_file`` for parameter info.
Returns
-------
@@ -230,6 +233,18 @@ def data_element_generator(fp,
element_struct_unpack = element_struct.unpack
defer_size = size_in_bytes(defer_size)
+ tag_set = set()
+ has_specific_char_set = True
+ if specific_tags is not None:
+ for tag in specific_tags:
+ if isinstance(tag, (str, compat.text_type)):
+ tag = Tag(tag_for_keyword(tag))
+ if isinstance(tag, BaseTag):
+ tag_set.add(tag)
+ has_specific_char_set = Tag(0x08, 0x05) in tag_set
+ tag_set.add(Tag(0x08, 0x05))
+ has_tag_set = len(tag_set) > 0
+
while True:
# Read tag, VR, length, get ready to read value
bytes_read = fp_read(8)
@@ -282,6 +297,11 @@ def data_element_generator(fp,
if length != 0xFFFFFFFF:
# don't defer loading of Specific Character Set value as it is
# needed immediately to get the character encoding for other tags
+ if has_tag_set and tag not in tag_set:
+ # skip the tag if not in specific tags
+ fp.seek(fp_tell() + length)
+ continue
+
if defer_size is not None and length > defer_size and tag != (
0x08, 0x05):
# Flag as deferred by setting value to None, and skip bytes
@@ -309,6 +329,8 @@ def data_element_generator(fp,
# Store the encoding value in the generator
# for use with future elements (SQs)
encoding = convert_encodings(encoding)
+ if not has_specific_char_set:
+ continue
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
@@ -340,6 +362,8 @@ def data_element_generator(fp,
logger_debug(msg.format(fp_tell()))
seq = read_sequence(fp, is_implicit_VR,
is_little_endian, length, encoding)
+ if has_tag_set and tag not in tag_set:
+ continue
yield DataElement(tag, VR, seq, value_tell,
is_undefined_length=True)
else:
@@ -358,14 +382,19 @@ def data_element_generator(fp,
# Store the encoding value in the generator for use
# with future elements (SQs)
encoding = convert_encodings(encoding)
+ if not has_specific_char_set:
+ continue
+ # tags with undefined length are skipped after read
+ if has_tag_set and tag not in tag_set:
+ continue
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
stop_when=None, defer_size=None,
- parent_encoding=default_encoding):
+ parent_encoding=default_encoding, specific_tags=None):
"""Return a Dataset instance containing the next dataset in the file.
Parameters
@@ -387,6 +416,8 @@ def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
parent_encoding :
optional encoding to use as a default in case
a Specific Character Set (0008,0005) isn't specified
+ specific_tags : list or None
+ See ``read_file`` for parameter info.
Returns
-------
@@ -400,7 +431,8 @@ def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
raw_data_elements = dict()
fpStart = fp.tell()
de_gen = data_element_generator(fp, is_implicit_VR, is_little_endian,
- stop_when, defer_size, parent_encoding)
+ stop_when, defer_size, parent_encoding,
+ specific_tags)
try:
while (bytelength is None) or (fp.tell() - fpStart < bytelength):
raw_data_element = next(de_gen)
@@ -635,7 +667,8 @@ def _at_pixel_data(tag, VR, length):
return tag == (0x7fe0, 0x0010)
-def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
+def read_partial(fileobj, stop_when=None, defer_size=None,
+ force=False, specific_tags=None):
"""Parse a DICOM file until a condition is met.
Parameters
@@ -648,6 +681,8 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
See ``read_file`` for parameter info.
force : boolean
See ``read_file`` for parameter info.
+ specific_tags : list or None
+ See ``read_file`` for parameter info.
Notes
-----
@@ -741,7 +776,8 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
# the transfer syntax (whether read from the file meta or guessed at)
try:
dataset = read_dataset(fileobj, is_implicit_VR, is_little_endian,
- stop_when=stop_when, defer_size=defer_size)
+ stop_when=stop_when, defer_size=defer_size,
+ specific_tags=specific_tags)
except EOFError:
pass # error already logged in read_dataset
@@ -757,7 +793,8 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
is_implicit_VR, is_little_endian)
-def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
+def read_file(fp, defer_size=None, stop_before_pixels=False,
+ force=False, specific_tags=None):
"""Read and parse a DICOM dataset stored in the DICOM File Format.
Read a DICOM dataset stored in accordance with the DICOM File Format
@@ -785,6 +822,9 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
If False (default), raises an InvalidDicomError if the file is missing
the File Meta Information header. Set to True to force reading even if
no File Meta Information header is found.
+ specific_tags : list or None
+ If not None, only the tags in the list are returned. The list
+ elements can be tags or tag names.
Returns
-------
@@ -832,8 +872,9 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
logger.debug("\n" + "-" * 80)
logger.debug("Call to read_file()")
msg = ("filename:'%s', defer_size='%s', "
- "stop_before_pixels=%s, force=%s")
- logger.debug(msg % (fp.name, defer_size, stop_before_pixels, force))
+ "stop_before_pixels=%s, force=%s, specific_tags=%s")
+ logger.debug(msg % (fp.name, defer_size, stop_before_pixels,
+ force, specific_tags))
if caller_owns_file:
logger.debug("Caller passed file object")
else:
@@ -849,7 +890,7 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
stop_when = _at_pixel_data
try:
dataset = read_partial(fp, stop_when, defer_size=defer_size,
- force=force)
+ force=force, specific_tags=specific_tags)
finally:
if not caller_owns_file:
fp.close()
| Add ability to read only specific tags
_From [[email protected]](https://code.google.com/u/101862332387277300822/) on December 03, 2010 11:13:32_
On my DICOMs, (2D slices from a GE MRI scanner) I've found that telling PyDICOM to skip the pixel data is actually slower than reading it. This is both true when repeatedly re-reading one file with timeit, or in running time_test.py
1. Point time_test.py at a directory full of dicoms
2. Enable only test_full_read() and test_partial(). Don't set dataset=.
3. Run a bunch of times, compare times.
test_full_read()
601937 function calls (601137 primitive calls) in 1.750 CPU seconds
ncalls tottime percall cumtime percall filename:lineno(function)
102100 0.906 0.000 0.906 0.000 {method 'read' of 'file' objects}
34200/33600 0.211 0.000 0.656 0.000 filereader.py:150(data_element_generator)
35100 0.141 0.000 0.171 0.000 tag.py:8(Tag)
400/200 0.120 0.000 1.032 0.005 filereader.py:259(read_dataset)
33800 0.086 0.000 0.086 0.000 {built-in method **new** of type object at 0x100126080}
test_partial()
829791 function calls (828991 primitive calls) in 2.355 CPU seconds
ncalls tottime percall cumtime percall filename:lineno(function)
101900 1.048 0.000 1.048 0.000 {method 'read' of 'file' objects}
67800 0.275 0.000 0.327 0.000 tag.py:8(Tag)
34100/33500 0.228 0.000 0.958 0.000 filereader.py:150(data_element_generator)
1 0.162 0.162 2.348 2.348 time_test.py:69(test_partial)
67200 0.134 0.000 0.502 0.000 tag.py:56(**eq**)
Or, in a python shell:
1. r_all = "d = dicom.read_file('s11_epi.0001')"
2. r_nopix = "d = dicom.read_file('s11_epi.0001', stop_before_pixels=True)"
3: timeit.timeit(r_all, "import dicom", number=500) # 2.7346301078796387
4: timeit.timeit(r_nopix, "import dicom", number=500) # 3.6994130611419678
I wouldn't be surprised to not save much time in reading these, but to have it be significantly slower? That's odd.
I'm using the version easy_install grabbed for me on December 2, 2010.
I can't post any dicoms to Google, but I could send you a few.
Cheers,
-Nate
_Original issue: http://code.google.com/p/pydicom/issues/detail?id=94_
| pydicom/pydicom | diff --git a/pydicom/tests/test_charset.py b/pydicom/tests/test_charset.py
index a8b41de2c..e54f516c8 100644
--- a/pydicom/tests/test_charset.py
+++ b/pydicom/tests/test_charset.py
@@ -79,6 +79,17 @@ class charsetTests(unittest.TestCase):
ds = dicomio.read_file(multiPN_file)
ds.decode()
+ def testEncodingWithSpecificTags(self):
+ """Encoding is correctly applied even if Specific Character Set
+ is not in specific tags..."""
+ ds = dicomio.read_file(jp_file, specific_tags=['PatientName'])
+ ds.decode()
+ self.assertEqual(1, len(ds))
+ expected = ('Yamada^Tarou='
+ '\033$B;3ED\033(B^\033$BB@O:\033(B='
+ '\033$B$d$^$@\033(B^\033$B$?$m$&\033(B')
+ self.assertEqual(expected, ds.PatientName)
+
if __name__ == "__main__":
# This is called if run alone, but not if loaded through run_tests.py
diff --git a/pydicom/tests/test_filereader.py b/pydicom/tests/test_filereader.py
index c89d4b15b..5a81c80ae 100644
--- a/pydicom/tests/test_filereader.py
+++ b/pydicom/tests/test_filereader.py
@@ -310,6 +310,43 @@ class ReaderTests(unittest.TestCase):
missing = [Tag(0x7fe0, 0x10), Tag(0xfffc, 0xfffc)]
self.assertEqual(ctfull_tags, ctpartial_tags + missing, msg)
+ def testSpecificTags(self):
+ """Returns only tags specified by user."""
+ ctspecific = read_file(ct_name, specific_tags=[
+ Tag(0x0010, 0x0010), 'PatientID', 'ImageType', 'ViewName'])
+ ctspecific_tags = sorted(ctspecific.keys())
+ expected = [
+ # ViewName does not exist in the data set
+ Tag(0x0008, 0x0008), Tag(0x0010, 0x0010), Tag(0x0010, 0x0020)
+ ]
+ self.assertEqual(expected, ctspecific_tags)
+
+ def testSpecificTagsWithUnknownLengthSQ(self):
+ """Returns only tags specified by user."""
+ unknown_len_sq_tag = Tag(0x3f03, 0x1001)
+ tags = read_file(priv_SQ_name, specific_tags=[
+ unknown_len_sq_tag])
+ tags = sorted(tags.keys())
+ self.assertEqual([unknown_len_sq_tag], tags)
+
+ tags = read_file(priv_SQ_name, specific_tags=[
+ 'PatientName'])
+ tags = sorted(tags.keys())
+ self.assertEqual([], tags)
+
+ def testSpecificTagsWithUnknownLengthTag(self):
+ """Returns only tags specified by user."""
+ unknown_len_tag = Tag(0x7fe0, 0x0010) # Pixel Data
+ tags = read_file(emri_jpeg_2k_lossless, specific_tags=[
+ unknown_len_tag])
+ tags = sorted(tags.keys())
+ self.assertEqual([unknown_len_tag], tags)
+
+ tags = read_file(emri_jpeg_2k_lossless, specific_tags=[
+ 'SpecificCharacterSet'])
+ tags = sorted(tags.keys())
+ self.assertEqual([Tag(0x08, 0x05)], tags)
+
def testPrivateSQ(self):
"""Can read private undefined length SQ without error."""
# From issues 91, 97, 98. Bug introduced by fast reading, due to
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/pydicom/pydicom.git@352049ca4e2bd53ef689484aed57933d067512a7#egg=pydicom
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_charset.py::charsetTests::testEncodingWithSpecificTags",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTags",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTagsWithUnknownLengthSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTagsWithUnknownLengthTag"
] | [
"pydicom/tests/test_filereader.py::DeferredReadTests::testFileExists",
"pydicom/tests/test_filereader.py::DeferredReadTests::testTimeCheck",
"pydicom/tests/test_filereader.py::DeferredReadTests::testValuesIdentical",
"pydicom/tests/test_filereader.py::DeferredReadTests::testZippedDeferred"
] | [
"pydicom/tests/test_charset.py::charsetTests::testEncodings",
"pydicom/tests/test_charset.py::charsetTests::testExplicitISO2022_IR6",
"pydicom/tests/test_charset.py::charsetTests::testLatin1",
"pydicom/tests/test_charset.py::charsetTests::testMultiPN",
"pydicom/tests/test_charset.py::charsetTests::testNestedCharacterSets",
"pydicom/tests/test_charset.py::charsetTests::testStandardFile",
"pydicom/tests/test_filereader.py::ReaderTests::testCT",
"pydicom/tests/test_filereader.py::ReaderTests::testCTPixelData",
"pydicom/tests/test_filereader.py::ReaderTests::testDeflate",
"pydicom/tests/test_filereader.py::ReaderTests::testDir",
"pydicom/tests/test_filereader.py::ReaderTests::testEmptyNumbersTag",
"pydicom/tests/test_filereader.py::ReaderTests::testExplicitVRBigEndianNoMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testExplicitVRLittleEndianNoMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testMR",
"pydicom/tests/test_filereader.py::ReaderTests::testNestedPrivateSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testNoForce",
"pydicom/tests/test_filereader.py::ReaderTests::testNoMetaGroupLength",
"pydicom/tests/test_filereader.py::ReaderTests::testNoPixelsRead",
"pydicom/tests/test_filereader.py::ReaderTests::testNoTransferSyntaxInMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testPlanarConfig",
"pydicom/tests/test_filereader.py::ReaderTests::testPrivateSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testRTDose",
"pydicom/tests/test_filereader.py::ReaderTests::testRTPlan",
"pydicom/tests/test_filereader.py::ReaderTests::testRTstruct",
"pydicom/tests/test_filereader.py::ReaderTests::testUTF8FileName",
"pydicom/tests/test_filereader.py::ReaderTests::test_commandset_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr",
"pydicom/tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr_compressed",
"pydicom/tests/test_filereader.py::ReaderTests::test_group_length_wrong",
"pydicom/tests/test_filereader.py::ReaderTests::test_long_specific_char_set",
"pydicom/tests/test_filereader.py::ReaderTests::test_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_preamble_command_group_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_preamble_file_meta_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_command_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_commandset_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_AE",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OD_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OD_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OL_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OL_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UC_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UC_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UR_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UR_implicit_little",
"pydicom/tests/test_filereader.py::JPEG_LS_Tests::testJPEG_LS_PixelArray",
"pydicom/tests/test_filereader.py::JPEG_LS_Tests::test_emri_JPEG_LS_PixelArray",
"pydicom/tests/test_filereader.py::BigEndian_Tests::test_big_endian_PixelArray",
"pydicom/tests/test_filereader.py::JPEG2000Tests::testJPEG2000",
"pydicom/tests/test_filereader.py::JPEG2000Tests::testJPEG2000PixelArray",
"pydicom/tests/test_filereader.py::JPEG2000Tests::test_emri_JPEG2000PixelArray",
"pydicom/tests/test_filereader.py::JPEGlossyTests::testJPEGBaselineColor3DPixelArray",
"pydicom/tests/test_filereader.py::JPEGlossyTests::testJPEGlossy",
"pydicom/tests/test_filereader.py::JPEGlossyTests::testJPEGlossyPixelArray",
"pydicom/tests/test_filereader.py::JPEGlosslessTests::testJPEGlossless",
"pydicom/tests/test_filereader.py::JPEGlosslessTests::testJPEGlosslessPixelArray",
"pydicom/tests/test_filereader.py::ReadTruncatedFileTests::testReadFileWithMissingPixelData",
"pydicom/tests/test_filereader.py::ReadTruncatedFileTests::testReadFileWithMissingPixelDataArray",
"pydicom/tests/test_filereader.py::FileLikeTests::testReadFileGivenFileLikeObject",
"pydicom/tests/test_filereader.py::FileLikeTests::testReadFileGivenFileObject"
] | [] | MIT License | 1,494 | 2,113 | [
"pydicom/filereader.py"
] |
Unidata__siphon-143 | e61c876c455ad6250594eddec57d08d454e9e611 | 2017-07-21 20:50:19 | e61c876c455ad6250594eddec57d08d454e9e611 | diff --git a/siphon/ncss.py b/siphon/ncss.py
index 7f1a971e..da1a11e8 100644
--- a/siphon/ncss.py
+++ b/siphon/ncss.py
@@ -88,7 +88,7 @@ class NCSS(HTTPEndPoint):
"""
# Make sure all variables are in the dataset
- return query.var and all(var in self.variables for var in query.var)
+ return bool(query.var) and all(var in self.variables for var in query.var)
def get_data(self, query):
"""Fetch parsed data from a THREDDS server using NCSS.
| NCSS validate_query with no variables
``` python
from siphon.ncss import NCSS
ncss = NCSS('http://thredds.ucar.edu/thredds/ncss/nws/metar/ncdecoded/Metar_Station_Data_fc.cdmr')
query = ncss.query()
ncss.validate_query(query)
```
returns `set()`
Looks like the nice short-circuit behavior results in the return of the empty variable query--I think a call to `bool()` is in order.
| Unidata/siphon | diff --git a/siphon/tests/test_ncss.py b/siphon/tests/test_ncss.py
index 4cee1576..1d0b7b70 100644
--- a/siphon/tests/test_ncss.py
+++ b/siphon/tests/test_ncss.py
@@ -92,6 +92,13 @@ class TestNCSS(object):
self.nq.variables('foo')
assert not self.ncss.validate_query(self.nq)
+ def test_empty_query(self):
+ """Test that an empty query is invalid."""
+ query = self.ncss.query()
+ res = self.ncss.validate_query(query)
+ assert not res
+ assert not isinstance(res, set)
+
def test_bad_query_no_vars(self):
"""Test that a query without variables is invalid."""
self.nq.var.clear()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": [
"environment.yml"
],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1673645646525/work
argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1633990451307/work
async_generator @ file:///home/conda/feedstock_root/build_artifacts/async_generator_1722652753231/work
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1671632566681/work
Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1667688356751/work
backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1702571698061/work
bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work
brotlipy==0.7.0
Cartopy @ file:///home/conda/feedstock_root/build_artifacts/cartopy_1630680837223/work
certifi==2021.5.30
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1631636256886/work
cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1632539733990/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1661170624537/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1655412516417/work
coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1633450575846/work
cryptography @ file:///home/conda/feedstock_root/build_artifacts/cryptography_1634230300355/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work
doc8 @ file:///home/conda/feedstock_root/build_artifacts/doc8_1652824562281/work
docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1618676244774/work
entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work
flake8 @ file:///home/conda/feedstock_root/build_artifacts/flake8_1659645013175/work
flake8-builtins @ file:///home/conda/feedstock_root/build_artifacts/flake8-builtins_1589815207697/work
flake8-comprehensions @ file:///home/conda/feedstock_root/build_artifacts/flake8-comprehensions_1641851052064/work
flake8-copyright @ file:///home/conda/feedstock_root/build_artifacts/flake8-copyright_1676003148518/work
flake8-docstrings @ file:///home/conda/feedstock_root/build_artifacts/flake8-docstrings_1616176909510/work
flake8-import-order @ file:///home/conda/feedstock_root/build_artifacts/flake8-import-order_1669670271290/work
flake8-mutable==1.2.0
flake8-pep3101==1.3.0
flake8-polyfill==1.0.2
flake8-print @ file:///home/conda/feedstock_root/build_artifacts/flake8-print_1606721773021/work
flake8-quotes @ file:///home/conda/feedstock_root/build_artifacts/flake8-quotes_1707605925191/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work
imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work
importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1630267465156/work
importlib-resources==5.4.0
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1603384189793/work
ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1620912934572/work/dist/ipykernel-5.5.5-py3-none-any.whl
ipyparallel==8.2.1
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1609697613279/work
ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work
ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1679421482533/work
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1605054537831/work
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1636510082894/work
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1634752161479/work
jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1696255489086/work
jupyter-client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1642858610849/work
jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1676328545892/work
jupyter-core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1631852698933/work
jupyterlab-pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1601375948261/work
jupyterlab-widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1655961217661/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1610099771815/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1621455668064/work
matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1611858699142/work
mccabe @ file:///home/conda/feedstock_root/build_artifacts/mccabe_1643049622439/work
mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1673904152039/work
more-itertools @ file:///home/conda/feedstock_root/build_artifacts/more-itertools_1690211628840/work
multidict @ file:///home/conda/feedstock_root/build_artifacts/multidict_1633329770033/work
nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1637327213451/work
nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert_1605401832871/work
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1617383142101/work
nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work
netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1633096406418/work
nose==1.3.7
notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1616419146127/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1626681920064/work
olefile @ file:///home/conda/feedstock_root/build_artifacts/olefile_1602866521163/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1637239678211/work
pandas==1.1.5
pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1595548966091/work
pbr @ file:///home/conda/feedstock_root/build_artifacts/pbr_1724777609752/work
pep8-naming @ file:///home/conda/feedstock_root/build_artifacts/pep8-naming_1628397497711/work
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1667297516076/work
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1630696616009/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1631522669284/work
prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work
prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1670414775770/work
protobuf==3.18.0
psutil==7.0.0
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
py @ file:///home/conda/feedstock_root/build_artifacts/py_1636301881863/work
pycodestyle @ file:///home/conda/feedstock_root/build_artifacts/pycodestyle_1659638152915/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work
pydocstyle @ file:///home/conda/feedstock_root/build_artifacts/pydocstyle_1672787369895/work
pyflakes @ file:///home/conda/feedstock_root/build_artifacts/pyflakes_1659210156976/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1672682006896/work
pyOpenSSL @ file:///home/conda/feedstock_root/build_artifacts/pyopenssl_1663846997386/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work
PyQt5==5.12.3
PyQt5_sip==4.19.18
PyQtChart==5.12
PyQtWebEngine==5.12.1
pyrsistent @ file:///home/conda/feedstock_root/build_artifacts/pyrsistent_1610146795286/work
pyshp @ file:///home/conda/feedstock_root/build_artifacts/pyshp_1659002966020/work
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1610291458349/work
pytest==6.2.5
pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1664412836798/work
pytest-flake8 @ file:///home/conda/feedstock_root/build_artifacts/pytest-flake8_1646767752166/work
pytest-runner @ file:///home/conda/feedstock_root/build_artifacts/pytest-runner_1646127837850/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1693930252784/work
PyYAML==5.4.1
pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1631793305981/work
qtconsole @ file:///home/conda/feedstock_root/build_artifacts/qtconsole-base_1640876679830/work
QtPy @ file:///home/conda/feedstock_root/build_artifacts/qtpy_1643828301492/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1656534056640/work
restructuredtext-lint @ file:///home/conda/feedstock_root/build_artifacts/restructuredtext_lint_1645724685739/work
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1629411471490/work
Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1682601222253/work
Shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1628205367507/work
-e git+https://github.com/Unidata/siphon.git@e61c876c455ad6250594eddec57d08d454e9e611#egg=siphon
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work
Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1658872348413/work
sphinx-gallery @ file:///home/conda/feedstock_root/build_artifacts/sphinx-gallery_1700542355088/work
sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1674487779667/work
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1675256494457/work
sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1691604704163/work
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1649380998999/work
stevedore @ file:///home/conda/feedstock_root/build_artifacts/stevedore_1629395095970/work
terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1631128154882/work
testpath @ file:///home/conda/feedstock_root/build_artifacts/testpath_1645693042223/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1635181214134/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1610094701020/work
tqdm==4.64.1
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1631041982274/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1644850595256/work
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1678635778344/work
vcrpy @ file:///home/conda/feedstock_root/build_artifacts/vcrpy_1602284745577/work
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1699959196938/work
webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work
widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1655939017940/work
wrapt @ file:///home/conda/feedstock_root/build_artifacts/wrapt_1633440474617/work
xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1621474818012/work
yarl @ file:///home/conda/feedstock_root/build_artifacts/yarl_1625232870338/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1633302054558/work
| name: siphon
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- alabaster=0.7.13=pyhd8ed1ab_0
- alsa-lib=1.2.7.2=h166bdaf_0
- argon2-cffi=21.1.0=py36h8f6f2f9_0
- async_generator=1.10=pyhd8ed1ab_1
- attrs=22.2.0=pyh71513ae_0
- babel=2.11.0=pyhd8ed1ab_0
- backcall=0.2.0=pyh9f0ad1d_0
- backports=1.0=pyhd8ed1ab_4
- backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0
- bleach=6.1.0=pyhd8ed1ab_0
- brotlipy=0.7.0=py36h8f6f2f9_1001
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- cartopy=0.19.0.post1=py36hbcbf2fa_1
- certifi=2021.5.30=py36h5fab9bb_0
- cffi=1.14.6=py36hd8eec40_1
- cftime=1.5.1=py36he33b4a0_0
- charset-normalizer=2.1.1=pyhd8ed1ab_0
- colorama=0.4.5=pyhd8ed1ab_0
- coverage=6.0=py36h8f6f2f9_1
- cryptography=35.0.0=py36hb60f036_0
- curl=7.87.0=h6312ad2_0
- cycler=0.11.0=pyhd8ed1ab_0
- dbus=1.13.6=h5008d03_3
- decorator=5.1.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- doc8=0.11.2=pyhd8ed1ab_0
- docutils=0.17.1=py36h5fab9bb_0
- entrypoints=0.4=pyhd8ed1ab_0
- expat=2.6.4=h5888daf_0
- flake8=5.0.4=pyhd8ed1ab_0
- flake8-builtins=1.5.3=pyh9f0ad1d_0
- flake8-comprehensions=3.8.0=pyhd8ed1ab_0
- flake8-copyright=0.2.4=pyhd8ed1ab_0
- flake8-docstrings=1.6.0=pyhd8ed1ab_0
- flake8-import-order=0.18.2=pyhd8ed1ab_0
- flake8-mutable=1.2.0=py_1
- flake8-pep3101=1.3.0=py_0
- flake8-polyfill=1.0.2=py_0
- flake8-print=4.0.0=pyhd8ed1ab_0
- flake8-quotes=3.4.0=pyhd8ed1ab_0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.12.1=h267a509_2
- geos=3.9.1=h9c3ff4c_2
- gettext=0.23.1=h5888daf_0
- gettext-tools=0.23.1=h5888daf_0
- glib=2.80.2=hf974151_0
- glib-tools=2.80.2=hb6ce0ca_0
- gst-plugins-base=1.20.3=h57caac4_2
- gstreamer=1.20.3=hd4edc92_2
- hdf4=4.2.15=h9772cbc_5
- hdf5=1.12.1=nompi_h2386368_104
- icu=69.1=h9c3ff4c_0
- idna=3.10=pyhd8ed1ab_0
- imagesize=1.4.1=pyhd8ed1ab_0
- importlib-metadata=4.8.1=py36h5fab9bb_0
- importlib_metadata=4.8.1=hd8ed1ab_1
- iniconfig=1.1.1=pyh9f0ad1d_0
- ipykernel=5.5.5=py36hcb3619a_0
- ipython=7.16.1=py36he448a4c_2
- ipython_genutils=0.2.0=pyhd8ed1ab_1
- ipywidgets=7.7.4=pyhd8ed1ab_0
- jedi=0.17.2=py36h5fab9bb_1
- jinja2=3.0.3=pyhd8ed1ab_0
- jpeg=9e=h0b41bf4_3
- jsonschema=4.1.2=pyhd8ed1ab_0
- jupyter=1.0.0=pyhd8ed1ab_10
- jupyter_client=7.1.2=pyhd8ed1ab_0
- jupyter_console=6.5.1=pyhd8ed1ab_0
- jupyter_core=4.8.1=py36h5fab9bb_0
- jupyterlab_pygments=0.1.2=pyh9f0ad1d_0
- jupyterlab_widgets=1.1.1=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.3.1=py36h605e78d_1
- krb5=1.20.1=hf9c8cef_0
- lcms2=2.12=hddcbb42_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=3.0=h9c3ff4c_0
- libasprintf=0.23.1=h8e693c7_0
- libasprintf-devel=0.23.1=h8e693c7_0
- libblas=3.9.0=20_linux64_openblas
- libcblas=3.9.0=20_linux64_openblas
- libclang=13.0.1=default_hb5137d0_10
- libcurl=7.87.0=h6312ad2_0
- libdeflate=1.10=h7f98852_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libev=4.33=hd590300_2
- libevent=2.1.10=h9b69904_4
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgettextpo=0.23.1=h5888daf_0
- libgettextpo-devel=0.23.1=h5888daf_0
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.80.2=hf974151_0
- libgomp=14.2.0=h767d61c_2
- libiconv=1.18=h4ce23a2_1
- liblapack=3.9.0=20_linux64_openblas
- libllvm13=13.0.1=hf817b99_2
- liblzma=5.6.4=hb9d3cd8_0
- liblzma-devel=5.6.4=hb9d3cd8_0
- libnetcdf=4.8.1=nompi_h329d8a1_102
- libnghttp2=1.51.0=hdcd2b5c_0
- libnsl=2.0.1=hd590300_0
- libogg=1.3.5=h4ab18f5_0
- libopenblas=0.3.25=pthreads_h413a1c8_0
- libopus=1.3.1=h7f98852_1
- libpng=1.6.43=h2797004_0
- libpq=14.5=h2baec63_5
- libprotobuf=3.18.0=h780b84a_1
- libsodium=1.0.18=h36c2ea0_1
- libsqlite=3.46.0=hde9e2c9_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libtiff=4.3.0=h0fcbabc_4
- libuuid=2.38.1=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=haae042b_4
- libzip=1.9.2=hc869a4a_1
- libzlib=1.2.13=h4ab18f5_6
- markupsafe=2.0.1=py36h8f6f2f9_0
- matplotlib=3.3.4=py36h5fab9bb_0
- matplotlib-base=3.3.4=py36hd391965_0
- mccabe=0.7.0=pyhd8ed1ab_0
- mistune=0.8.4=pyh1a96a4e_1006
- more-itertools=10.0.0=pyhd8ed1ab_0
- multidict=5.2.0=py36h8f6f2f9_0
- mysql-common=8.0.32=h14678bc_0
- mysql-libs=8.0.32=h54cf53e_0
- nbclient=0.5.9=pyhd8ed1ab_0
- nbconvert=6.0.7=py36h5fab9bb_3
- nbformat=5.1.3=pyhd8ed1ab_0
- ncurses=6.5=h2d0b736_3
- nest-asyncio=1.6.0=pyhd8ed1ab_0
- netcdf4=1.5.7=nompi_py36h775750b_103
- notebook=6.3.0=py36h5fab9bb_0
- nspr=4.36=h5888daf_0
- nss=3.100=hca3bf56_0
- numpy=1.19.5=py36hfc0c790_2
- olefile=0.46=pyh9f0ad1d_1
- openjpeg=2.5.0=h7d73246_0
- openssl=1.1.1w=hd590300_0
- packaging=21.3=pyhd8ed1ab_0
- pandas=1.1.5=py36h284efc9_0
- pandoc=2.19.2=h32600fe_2
- pandocfilters=1.5.0=pyhd8ed1ab_0
- parso=0.7.1=pyh9f0ad1d_0
- pbr=6.1.0=pyhd8ed1ab_0
- pcre2=10.43=hcad00b1_0
- pep8-naming=0.12.1=pyhd8ed1ab_0
- pexpect=4.8.0=pyh1a96a4e_2
- pickleshare=0.7.5=py_1003
- pillow=8.3.2=py36h676a545_0
- pip=21.3.1=pyhd8ed1ab_0
- pluggy=1.0.0=py36h5fab9bb_1
- proj=7.2.0=h277dcde_2
- prometheus_client=0.17.1=pyhd8ed1ab_0
- prompt-toolkit=3.0.36=pyha770c72_0
- prompt_toolkit=3.0.36=hd8ed1ab_0
- protobuf=3.18.0=py36hc4f0c31_0
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd3deb0d_0
- py=1.11.0=pyh6c4a22f_0
- pycodestyle=2.9.1=pyhd8ed1ab_0
- pycparser=2.21=pyhd8ed1ab_0
- pydocstyle=6.2.0=pyhd8ed1ab_0
- pyflakes=2.5.0=pyhd8ed1ab_0
- pygments=2.14.0=pyhd8ed1ab_0
- pyopenssl=22.0.0=pyhd8ed1ab_1
- pyparsing=3.1.4=pyhd8ed1ab_0
- pyqt=5.12.3=py36h5fab9bb_7
- pyqt-impl=5.12.3=py36h7ec31b9_7
- pyqt5-sip=4.19.18=py36hc4f0c31_7
- pyqtchart=5.12=py36h7ec31b9_7
- pyqtwebengine=5.12.1=py36h7ec31b9_7
- pyrsistent=0.17.3=py36h8f6f2f9_2
- pyshp=2.3.1=pyhd8ed1ab_0
- pysocks=1.7.1=py36h5fab9bb_3
- pytest=6.2.5=py36h5fab9bb_0
- pytest-cov=4.0.0=pyhd8ed1ab_0
- pytest-flake8=1.1.0=pyhd8ed1ab_0
- pytest-runner=5.3.2=pyhd8ed1ab_0
- python=3.6.15=hb7a2778_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h8f6f2f9_1
- pyzmq=22.3.0=py36h7068817_0
- qt=5.12.9=h1304e3e_6
- qtconsole-base=5.2.2=pyhd8ed1ab_1
- qtpy=2.0.1=pyhd8ed1ab_0
- readline=8.2=h8c095d6_2
- requests=2.28.1=pyhd8ed1ab_0
- restructuredtext_lint=1.4.0=pyhd8ed1ab_0
- scipy=1.5.3=py36h81d768a_1
- send2trash=1.8.2=pyh41d4057_0
- setuptools=58.0.4=py36h5fab9bb_2
- shapely=1.7.1=py36hff28ebb_5
- six=1.16.0=pyh6c4a22f_0
- snowballstemmer=2.2.0=pyhd8ed1ab_0
- sphinx=5.1.1=pyh6c4a22f_0
- sphinx-gallery=0.15.0=pyhd8ed1ab_0
- sphinxcontrib-applehelp=1.0.4=pyhd8ed1ab_0
- sphinxcontrib-devhelp=1.0.2=py_0
- sphinxcontrib-htmlhelp=2.0.1=pyhd8ed1ab_0
- sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_0
- sphinxcontrib-qthelp=1.0.3=py_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd8ed1ab_2
- sqlite=3.46.0=h6d4b2fc_0
- stevedore=3.4.0=py36h5fab9bb_0
- terminado=0.12.1=py36h5fab9bb_0
- testpath=0.6.0=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_0
- tomli=1.2.2=pyhd8ed1ab_0
- tornado=6.1=py36h8f6f2f9_1
- traitlets=4.3.3=pyhd8ed1ab_2
- typing-extensions=4.1.1=hd8ed1ab_0
- typing_extensions=4.1.1=pyha770c72_0
- urllib3=1.26.15=pyhd8ed1ab_0
- vcrpy=4.1.1=py_0
- wcwidth=0.2.10=pyhd8ed1ab_0
- webencodings=0.5.1=pyhd8ed1ab_2
- wheel=0.37.1=pyhd8ed1ab_0
- widgetsnbextension=3.6.1=pyha770c72_0
- wrapt=1.13.1=py36h8f6f2f9_0
- xarray=0.18.2=pyhd8ed1ab_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xz=5.6.4=hbcc6ac9_0
- xz-gpl-tools=5.6.4=hbcc6ac9_0
- xz-tools=5.6.4=hb9d3cd8_0
- yaml=0.2.5=h7f98852_2
- yarl=1.6.3=py36h8f6f2f9_2
- zeromq=4.3.5=h59595ed_1
- zipp=3.6.0=pyhd8ed1ab_0
- zlib=1.2.13=h4ab18f5_6
- zstd=1.5.6=ha6fb4c9_0
- pip:
- importlib-resources==5.4.0
- ipyparallel==8.2.1
- nose==1.3.7
- psutil==7.0.0
- tqdm==4.64.1
prefix: /opt/conda/envs/siphon
| [
"siphon/tests/test_ncss.py::TestNCSS::test_empty_query"
] | [] | [
"siphon/tests/test_ncss.py::test_ncss_query_proj_box",
"siphon/tests/test_ncss.py::test_ncss_query_vertical_level",
"siphon/tests/test_ncss.py::test_ncss_query_add_latlon",
"siphon/tests/test_ncss.py::test_ncss_query_strides",
"siphon/tests/test_ncss.py::test_ncss_query_accept",
"siphon/tests/test_ncss.py::TestNCSS::test_good_query",
"siphon/tests/test_ncss.py::TestNCSS::test_bad_query",
"siphon/tests/test_ncss.py::TestNCSS::test_bad_query_no_vars",
"siphon/tests/test_ncss.py::TestNCSS::test_xml_point",
"siphon/tests/test_ncss.py::TestNCSS::test_csv_point",
"siphon/tests/test_ncss.py::TestNCSS::test_unit_handler_csv",
"siphon/tests/test_ncss.py::TestNCSS::test_unit_handler_xml",
"siphon/tests/test_ncss.py::TestNCSS::test_netcdf_point",
"siphon/tests/test_ncss.py::TestNCSS::test_netcdf4_point",
"siphon/tests/test_ncss.py::TestNCSS::test_vertical_level",
"siphon/tests/test_ncss.py::TestNCSS::test_raw_csv",
"siphon/tests/test_ncss.py::TestNCSS::test_unknown_mime"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,496 | 160 | [
"siphon/ncss.py"
] |
|
networkx__networkx-2532 | f1601955df3e0e9c221cfb0460b761d1d00a2eca | 2017-07-21 22:58:11 | 3f4fd85765bf2d88188cfd4c84d0707152e6cd1e | diff --git a/networkx/algorithms/mis.py b/networkx/algorithms/mis.py
index 4b6aab9b6..ad48a5379 100644
--- a/networkx/algorithms/mis.py
+++ b/networkx/algorithms/mis.py
@@ -1,24 +1,26 @@
# -*- coding: utf-8 -*-
# $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $
-"""
-Algorithm to find a maximal (not maximum) independent set.
-
-"""
# Leo Lopes <[email protected]>
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
+#
+# Authors: Leo Lopes <[email protected]>
+# Loïc Séguin-C. <[email protected]>
+"""
+Algorithm to find a maximal (not maximum) independent set.
-__author__ = "\n".join(["Leo Lopes <[email protected]>",
- "Loïc Séguin-C. <[email protected]>"])
+"""
+import random
+import networkx as nx
+from networkx.utils import not_implemented_for
__all__ = ['maximal_independent_set']
-import random
-import networkx as nx
+@not_implemented_for('directed')
def maximal_independent_set(G, nodes=None):
"""Return a random maximal independent set guaranteed to contain
a given set of nodes.
@@ -27,10 +29,10 @@ def maximal_independent_set(G, nodes=None):
of G induced by these nodes contains no edges. A maximal
independent set is an independent set such that it is not possible
to add a new node and still get an independent set.
-
+
Parameters
----------
- G : NetworkX graph
+ G : NetworkX graph
nodes : list or iterable
Nodes that must be part of the independent set. This set of nodes
@@ -38,7 +40,7 @@ def maximal_independent_set(G, nodes=None):
Returns
-------
- indep_nodes : list
+ indep_nodes : list
List of nodes that are part of a maximal independent set.
Raises
@@ -47,6 +49,9 @@ def maximal_independent_set(G, nodes=None):
If the nodes in the provided list are not part of the graph or
do not form an independent set, an exception is raised.
+ NetworkXNotImplemented
+ If `G` is directed.
+
Examples
--------
>>> G = nx.path_graph(5)
@@ -54,7 +59,7 @@ def maximal_independent_set(G, nodes=None):
[4, 0, 2]
>>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP
[1, 3]
-
+
Notes
-----
This algorithm does not solve the maximum independent set problem.
@@ -67,7 +72,7 @@ def maximal_independent_set(G, nodes=None):
if not nodes.issubset(G):
raise nx.NetworkXUnfeasible(
"%s is not a subset of the nodes of G" % nodes)
- neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])
+ neighbors = set.union(*[set(G.adj[v]) for v in nodes])
if set.intersection(neighbors, nodes):
raise nx.NetworkXUnfeasible(
"%s is not an independent set of G" % nodes)
@@ -76,6 +81,5 @@ def maximal_independent_set(G, nodes=None):
while available_nodes:
node = random.choice(list(available_nodes))
indep_nodes.append(node)
- available_nodes.difference_update(list(G.neighbors(node)) + [node])
+ available_nodes.difference_update(list(G.adj[node]) + [node])
return indep_nodes
-
| maximal_independent_set does not work for DiGraph
Currently [maximal_independent_set](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/algorithms/mis.py#L70) returns the wrong results for a `DiGraph` because it uses the `G.neighbors` method which returns only the successor nodes in a `DiGraph`. I believe the [all_neighbors](https://github.com/networkx/networkx/blob/13b373bf6938c077d1e61adc60a48cb910a75755/networkx/classes/function.py#L540) function should be used instead to make `maximal_independent_set` work correctly for both graph types.
| networkx/networkx | diff --git a/networkx/algorithms/tests/test_mis.py b/networkx/algorithms/tests/test_mis.py
index 9136e2db7..ba72b20a4 100644
--- a/networkx/algorithms/tests/test_mis.py
+++ b/networkx/algorithms/tests/test_mis.py
@@ -1,10 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: test_maximal_independent_set.py 577 2011-03-01 06:07:53Z lleeoo $
-"""
-Tests for maximal (not maximum) independent sets.
-
-"""
# Copyright (C) 2004-2016 by
# Leo Lopes <[email protected]>
# Aric Hagberg <[email protected]>
@@ -12,37 +8,42 @@ Tests for maximal (not maximum) independent sets.
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
+#
+# Author: Leo Lopes <[email protected]>
+"""
+Tests for maximal (not maximum) independent sets.
-__author__ = """Leo Lopes ([email protected])"""
+"""
from nose.tools import *
import networkx as nx
import random
+
class TestMaximalIndependantSet(object):
def setup(self):
self.florentine = nx.Graph()
- self.florentine.add_edge('Acciaiuoli','Medici')
- self.florentine.add_edge('Castellani','Peruzzi')
- self.florentine.add_edge('Castellani','Strozzi')
- self.florentine.add_edge('Castellani','Barbadori')
- self.florentine.add_edge('Medici','Barbadori')
- self.florentine.add_edge('Medici','Ridolfi')
- self.florentine.add_edge('Medici','Tornabuoni')
- self.florentine.add_edge('Medici','Albizzi')
- self.florentine.add_edge('Medici','Salviati')
- self.florentine.add_edge('Salviati','Pazzi')
- self.florentine.add_edge('Peruzzi','Strozzi')
- self.florentine.add_edge('Peruzzi','Bischeri')
- self.florentine.add_edge('Strozzi','Ridolfi')
- self.florentine.add_edge('Strozzi','Bischeri')
- self.florentine.add_edge('Ridolfi','Tornabuoni')
- self.florentine.add_edge('Tornabuoni','Guadagni')
- self.florentine.add_edge('Albizzi','Ginori')
- self.florentine.add_edge('Albizzi','Guadagni')
- self.florentine.add_edge('Bischeri','Guadagni')
- self.florentine.add_edge('Guadagni','Lamberteschi')
-
+ self.florentine.add_edge('Acciaiuoli', 'Medici')
+ self.florentine.add_edge('Castellani', 'Peruzzi')
+ self.florentine.add_edge('Castellani', 'Strozzi')
+ self.florentine.add_edge('Castellani', 'Barbadori')
+ self.florentine.add_edge('Medici', 'Barbadori')
+ self.florentine.add_edge('Medici', 'Ridolfi')
+ self.florentine.add_edge('Medici', 'Tornabuoni')
+ self.florentine.add_edge('Medici', 'Albizzi')
+ self.florentine.add_edge('Medici', 'Salviati')
+ self.florentine.add_edge('Salviati', 'Pazzi')
+ self.florentine.add_edge('Peruzzi', 'Strozzi')
+ self.florentine.add_edge('Peruzzi', 'Bischeri')
+ self.florentine.add_edge('Strozzi', 'Ridolfi')
+ self.florentine.add_edge('Strozzi', 'Bischeri')
+ self.florentine.add_edge('Ridolfi', 'Tornabuoni')
+ self.florentine.add_edge('Tornabuoni', 'Guadagni')
+ self.florentine.add_edge('Albizzi', 'Ginori')
+ self.florentine.add_edge('Albizzi', 'Guadagni')
+ self.florentine.add_edge('Bischeri', 'Guadagni')
+ self.florentine.add_edge('Guadagni', 'Lamberteschi')
+
def test_K5(self):
"""Maximal independent set: K5"""
G = nx.complete_graph(5)
@@ -63,19 +64,22 @@ class TestMaximalIndependantSet(object):
assert_raises(nx.NetworkXUnfeasible,
nx.maximal_independent_set, G, ["Salviati", "Pazzi"])
+ def test_digraph_exception(self):
+ G = nx.DiGraph([(1, 2), (3, 4)])
+ assert_raises(nx.NetworkXNotImplemented, nx.maximal_independent_set, G)
+
def test_florentine_family(self):
G = self.florentine
indep = nx.maximal_independent_set(G, ["Medici", "Bischeri"])
assert_equal(sorted(indep),
sorted(["Medici", "Bischeri", "Castellani", "Pazzi",
- "Ginori", "Lamberteschi"]))
+ "Ginori", "Lamberteschi"]))
def test_bipartite(self):
G = nx.complete_bipartite_graph(12, 34)
indep = nx.maximal_independent_set(G, [4, 5, 9, 10])
assert_equal(sorted(indep), list(range(12)))
-
def test_random_graphs(self):
"""Generate 50 random graphs of different types and sizes and
make sure that all sets are independent and maximal."""
@@ -86,4 +90,3 @@ class TestMaximalIndependantSet(object):
neighbors_of_MIS = set.union(*(set(G.neighbors(v)) for v in IS))
for v in set(G.nodes()).difference(IS):
assert_true(v in neighbors_of_MIS)
-
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | help | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@f1601955df3e0e9c221cfb0460b761d1d00a2eca#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/algorithms/tests/test_mis.py::TestMaximalIndependantSet::test_digraph_exception"
] | [] | [
"networkx/algorithms/tests/test_mis.py::TestMaximalIndependantSet::test_K5",
"networkx/algorithms/tests/test_mis.py::TestMaximalIndependantSet::test_K55",
"networkx/algorithms/tests/test_mis.py::TestMaximalIndependantSet::test_exception",
"networkx/algorithms/tests/test_mis.py::TestMaximalIndependantSet::test_florentine_family",
"networkx/algorithms/tests/test_mis.py::TestMaximalIndependantSet::test_bipartite",
"networkx/algorithms/tests/test_mis.py::TestMaximalIndependantSet::test_random_graphs"
] | [] | BSD 3-Clause | 1,497 | 932 | [
"networkx/algorithms/mis.py"
] |
|
pydicom__pydicom-432 | 352049ca4e2bd53ef689484aed57933d067512a7 | 2017-07-23 08:58:35 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | diff --git a/pydicom/filereader.py b/pydicom/filereader.py
index b9874fc41..692cc43d9 100644
--- a/pydicom/filereader.py
+++ b/pydicom/filereader.py
@@ -14,7 +14,7 @@ import zlib
from io import BytesIO
from pydicom.misc import size_in_bytes
-from pydicom.tag import TupleTag
+from pydicom.tag import TupleTag, Tag, BaseTag
from pydicom.dataelem import RawDataElement
from pydicom.util.hexutil import bytes2hex
from pydicom.valuerep import extra_length_VRs
@@ -41,7 +41,7 @@ from pydicom.dataset import (
)
from pydicom.dicomdir import DicomDir
-from pydicom.datadict import dictionary_VR
+from pydicom.datadict import dictionary_VR, tag_for_keyword
from pydicom.dataelem import DataElement
from pydicom.tag import (
ItemTag,
@@ -159,7 +159,8 @@ def data_element_generator(fp,
is_little_endian,
stop_when=None,
defer_size=None,
- encoding=default_encoding):
+ encoding=default_encoding,
+ specific_tags=None):
"""Create a generator to efficiently return the raw data elements.
@@ -177,6 +178,8 @@ def data_element_generator(fp,
See ``read_file`` for parameter info.
encoding :
Encoding scheme
+ specific_tags : list or None
+ See ``read_file`` for parameter info.
Returns
-------
@@ -230,6 +233,18 @@ def data_element_generator(fp,
element_struct_unpack = element_struct.unpack
defer_size = size_in_bytes(defer_size)
+ tag_set = set()
+ has_specific_char_set = True
+ if specific_tags is not None:
+ for tag in specific_tags:
+ if isinstance(tag, (str, compat.text_type)):
+ tag = Tag(tag_for_keyword(tag))
+ if isinstance(tag, BaseTag):
+ tag_set.add(tag)
+ has_specific_char_set = Tag(0x08, 0x05) in tag_set
+ tag_set.add(Tag(0x08, 0x05))
+ has_tag_set = len(tag_set) > 0
+
while True:
# Read tag, VR, length, get ready to read value
bytes_read = fp_read(8)
@@ -282,6 +297,11 @@ def data_element_generator(fp,
if length != 0xFFFFFFFF:
# don't defer loading of Specific Character Set value as it is
# needed immediately to get the character encoding for other tags
+ if has_tag_set and tag not in tag_set:
+ # skip the tag if not in specific tags
+ fp.seek(fp_tell() + length)
+ continue
+
if defer_size is not None and length > defer_size and tag != (
0x08, 0x05):
# Flag as deferred by setting value to None, and skip bytes
@@ -309,6 +329,8 @@ def data_element_generator(fp,
# Store the encoding value in the generator
# for use with future elements (SQs)
encoding = convert_encodings(encoding)
+ if not has_specific_char_set:
+ continue
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
@@ -340,6 +362,8 @@ def data_element_generator(fp,
logger_debug(msg.format(fp_tell()))
seq = read_sequence(fp, is_implicit_VR,
is_little_endian, length, encoding)
+ if has_tag_set and tag not in tag_set:
+ continue
yield DataElement(tag, VR, seq, value_tell,
is_undefined_length=True)
else:
@@ -358,14 +382,19 @@ def data_element_generator(fp,
# Store the encoding value in the generator for use
# with future elements (SQs)
encoding = convert_encodings(encoding)
+ if not has_specific_char_set:
+ continue
+ # tags with undefined length are skipped after read
+ if has_tag_set and tag not in tag_set:
+ continue
yield RawDataElement(tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
stop_when=None, defer_size=None,
- parent_encoding=default_encoding):
+ parent_encoding=default_encoding, specific_tags=None):
"""Return a Dataset instance containing the next dataset in the file.
Parameters
@@ -387,6 +416,8 @@ def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
parent_encoding :
optional encoding to use as a default in case
a Specific Character Set (0008,0005) isn't specified
+ specific_tags : list or None
+ See ``read_file`` for parameter info.
Returns
-------
@@ -400,7 +431,8 @@ def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
raw_data_elements = dict()
fpStart = fp.tell()
de_gen = data_element_generator(fp, is_implicit_VR, is_little_endian,
- stop_when, defer_size, parent_encoding)
+ stop_when, defer_size, parent_encoding,
+ specific_tags)
try:
while (bytelength is None) or (fp.tell() - fpStart < bytelength):
raw_data_element = next(de_gen)
@@ -635,7 +667,8 @@ def _at_pixel_data(tag, VR, length):
return tag == (0x7fe0, 0x0010)
-def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
+def read_partial(fileobj, stop_when=None, defer_size=None,
+ force=False, specific_tags=None):
"""Parse a DICOM file until a condition is met.
Parameters
@@ -648,6 +681,8 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
See ``read_file`` for parameter info.
force : boolean
See ``read_file`` for parameter info.
+ specific_tags : list or None
+ See ``read_file`` for parameter info.
Notes
-----
@@ -741,7 +776,8 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
# the transfer syntax (whether read from the file meta or guessed at)
try:
dataset = read_dataset(fileobj, is_implicit_VR, is_little_endian,
- stop_when=stop_when, defer_size=defer_size)
+ stop_when=stop_when, defer_size=defer_size,
+ specific_tags=specific_tags)
except EOFError:
pass # error already logged in read_dataset
@@ -757,7 +793,8 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
is_implicit_VR, is_little_endian)
-def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
+def read_file(fp, defer_size=None, stop_before_pixels=False,
+ force=False, specific_tags=None):
"""Read and parse a DICOM dataset stored in the DICOM File Format.
Read a DICOM dataset stored in accordance with the DICOM File Format
@@ -785,6 +822,9 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
If False (default), raises an InvalidDicomError if the file is missing
the File Meta Information header. Set to True to force reading even if
no File Meta Information header is found.
+ specific_tags : list or None
+ If not None, only the tags in the list are returned. The list
+ elements can be tags or tag names.
Returns
-------
@@ -832,8 +872,9 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
logger.debug("\n" + "-" * 80)
logger.debug("Call to read_file()")
msg = ("filename:'%s', defer_size='%s', "
- "stop_before_pixels=%s, force=%s")
- logger.debug(msg % (fp.name, defer_size, stop_before_pixels, force))
+ "stop_before_pixels=%s, force=%s, specific_tags=%s")
+ logger.debug(msg % (fp.name, defer_size, stop_before_pixels,
+ force, specific_tags))
if caller_owns_file:
logger.debug("Caller passed file object")
else:
@@ -849,7 +890,7 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
stop_when = _at_pixel_data
try:
dataset = read_partial(fp, stop_when, defer_size=defer_size,
- force=force)
+ force=force, specific_tags=specific_tags)
finally:
if not caller_owns_file:
fp.close()
diff --git a/pydicom/sequence.py b/pydicom/sequence.py
index aa7383b85..a484ca4ca 100644
--- a/pydicom/sequence.py
+++ b/pydicom/sequence.py
@@ -60,9 +60,8 @@ class Sequence(MultiValue):
def __repr__(self):
"""String representation of the Sequence."""
- formatstr = "<%(classname)s, length %(count)d, at %(id)X>"
+ formatstr = "<%(classname)s, length %(count)d>"
return formatstr % {
'classname': self.__class__.__name__,
- 'id': id(self),
'count': len(self)
}
| possibly different repr representation for tag, sequence
_From [[email protected]](https://code.google.com/u/105261369456480898587/) on October 16, 2011 10:23:19_
What steps will reproduce the problem?
details below
What is the expected output? What do you see instead?
`<Sequence, Length X>`
or `<Sequence, Length X, offset_hdr/data>`
but, i agree this should be thought through before making a change.
What version of the product are you using?
current dev
Please provide any additional information below.
when i print a tag, that is a sequence, it says:
`<Sequence, length 1, at 109AA8FC8>`
when i rerun it, it gives me a different hex number which leads me to believe this is a memory address. should it be a memory address, or should it give a byte offset into the dicom header/data or simply say `<Sequence, length x>`?
i'm generating the headers automatically and git is picking up this difference between header output each time.
-(0008, 1110) Referenced Study Sequence SQ: <Sequence, length 1, at 109AA8FC8>
-(0008, 1111) Referenced Performed Procedure Step SQ: <Sequence, length 1, at 109AB5AF8>
-(0008, 1140) Referenced Image Sequence SQ: <Sequence, length 2, at 109AB5BA8>
+(0008, 1110) Referenced Study Sequence SQ: <Sequence, length 1, at 109AB5FC8>
+(0008, 1111) Referenced Performed Procedure Step SQ: <Sequence, length 1, at 109AB5E68>
+(0008, 1140) Referenced Image Sequence SQ: <Sequence, length 2, at 109AB5EC0>
_Original issue: http://code.google.com/p/pydicom/issues/detail?id=106_
| pydicom/pydicom | diff --git a/pydicom/tests/test_charset.py b/pydicom/tests/test_charset.py
index a8b41de2c..e54f516c8 100644
--- a/pydicom/tests/test_charset.py
+++ b/pydicom/tests/test_charset.py
@@ -79,6 +79,17 @@ class charsetTests(unittest.TestCase):
ds = dicomio.read_file(multiPN_file)
ds.decode()
+ def testEncodingWithSpecificTags(self):
+ """Encoding is correctly applied even if Specific Character Set
+ is not in specific tags..."""
+ ds = dicomio.read_file(jp_file, specific_tags=['PatientName'])
+ ds.decode()
+ self.assertEqual(1, len(ds))
+ expected = ('Yamada^Tarou='
+ '\033$B;3ED\033(B^\033$BB@O:\033(B='
+ '\033$B$d$^$@\033(B^\033$B$?$m$&\033(B')
+ self.assertEqual(expected, ds.PatientName)
+
if __name__ == "__main__":
# This is called if run alone, but not if loaded through run_tests.py
diff --git a/pydicom/tests/test_filereader.py b/pydicom/tests/test_filereader.py
index c89d4b15b..5a81c80ae 100644
--- a/pydicom/tests/test_filereader.py
+++ b/pydicom/tests/test_filereader.py
@@ -310,6 +310,43 @@ class ReaderTests(unittest.TestCase):
missing = [Tag(0x7fe0, 0x10), Tag(0xfffc, 0xfffc)]
self.assertEqual(ctfull_tags, ctpartial_tags + missing, msg)
+ def testSpecificTags(self):
+ """Returns only tags specified by user."""
+ ctspecific = read_file(ct_name, specific_tags=[
+ Tag(0x0010, 0x0010), 'PatientID', 'ImageType', 'ViewName'])
+ ctspecific_tags = sorted(ctspecific.keys())
+ expected = [
+ # ViewName does not exist in the data set
+ Tag(0x0008, 0x0008), Tag(0x0010, 0x0010), Tag(0x0010, 0x0020)
+ ]
+ self.assertEqual(expected, ctspecific_tags)
+
+ def testSpecificTagsWithUnknownLengthSQ(self):
+ """Returns only tags specified by user."""
+ unknown_len_sq_tag = Tag(0x3f03, 0x1001)
+ tags = read_file(priv_SQ_name, specific_tags=[
+ unknown_len_sq_tag])
+ tags = sorted(tags.keys())
+ self.assertEqual([unknown_len_sq_tag], tags)
+
+ tags = read_file(priv_SQ_name, specific_tags=[
+ 'PatientName'])
+ tags = sorted(tags.keys())
+ self.assertEqual([], tags)
+
+ def testSpecificTagsWithUnknownLengthTag(self):
+ """Returns only tags specified by user."""
+ unknown_len_tag = Tag(0x7fe0, 0x0010) # Pixel Data
+ tags = read_file(emri_jpeg_2k_lossless, specific_tags=[
+ unknown_len_tag])
+ tags = sorted(tags.keys())
+ self.assertEqual([unknown_len_tag], tags)
+
+ tags = read_file(emri_jpeg_2k_lossless, specific_tags=[
+ 'SpecificCharacterSet'])
+ tags = sorted(tags.keys())
+ self.assertEqual([Tag(0x08, 0x05)], tags)
+
def testPrivateSQ(self):
"""Can read private undefined length SQ without error."""
# From issues 91, 97, 98. Bug introduced by fast reading, due to
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"nose-timer"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
nose-timer==1.0.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/pydicom/pydicom.git@352049ca4e2bd53ef689484aed57933d067512a7#egg=pydicom
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- nose-timer==1.0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_charset.py::charsetTests::testEncodingWithSpecificTags",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTags",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTagsWithUnknownLengthSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTagsWithUnknownLengthTag"
] | [
"pydicom/tests/test_filereader.py::DeferredReadTests::testFileExists",
"pydicom/tests/test_filereader.py::DeferredReadTests::testTimeCheck",
"pydicom/tests/test_filereader.py::DeferredReadTests::testValuesIdentical",
"pydicom/tests/test_filereader.py::DeferredReadTests::testZippedDeferred"
] | [
"pydicom/tests/test_charset.py::charsetTests::testEncodings",
"pydicom/tests/test_charset.py::charsetTests::testExplicitISO2022_IR6",
"pydicom/tests/test_charset.py::charsetTests::testLatin1",
"pydicom/tests/test_charset.py::charsetTests::testMultiPN",
"pydicom/tests/test_charset.py::charsetTests::testNestedCharacterSets",
"pydicom/tests/test_charset.py::charsetTests::testStandardFile",
"pydicom/tests/test_filereader.py::ReaderTests::testCT",
"pydicom/tests/test_filereader.py::ReaderTests::testDeflate",
"pydicom/tests/test_filereader.py::ReaderTests::testDir",
"pydicom/tests/test_filereader.py::ReaderTests::testEmptyNumbersTag",
"pydicom/tests/test_filereader.py::ReaderTests::testExplicitVRBigEndianNoMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testExplicitVRLittleEndianNoMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testMR",
"pydicom/tests/test_filereader.py::ReaderTests::testNestedPrivateSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testNoForce",
"pydicom/tests/test_filereader.py::ReaderTests::testNoMetaGroupLength",
"pydicom/tests/test_filereader.py::ReaderTests::testNoPixelsRead",
"pydicom/tests/test_filereader.py::ReaderTests::testNoTransferSyntaxInMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testPlanarConfig",
"pydicom/tests/test_filereader.py::ReaderTests::testPrivateSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testRTDose",
"pydicom/tests/test_filereader.py::ReaderTests::testRTPlan",
"pydicom/tests/test_filereader.py::ReaderTests::testRTstruct",
"pydicom/tests/test_filereader.py::ReaderTests::testUTF8FileName",
"pydicom/tests/test_filereader.py::ReaderTests::test_commandset_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr",
"pydicom/tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr_compressed",
"pydicom/tests/test_filereader.py::ReaderTests::test_group_length_wrong",
"pydicom/tests/test_filereader.py::ReaderTests::test_long_specific_char_set",
"pydicom/tests/test_filereader.py::ReaderTests::test_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_preamble_command_group_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_preamble_file_meta_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_command_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_commandset_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_AE",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OD_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OD_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OL_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OL_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UC_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UC_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UR_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UR_implicit_little",
"pydicom/tests/test_filereader.py::JPEG_LS_Tests::testJPEG_LS_PixelArray",
"pydicom/tests/test_filereader.py::JPEG_LS_Tests::test_emri_JPEG_LS_PixelArray",
"pydicom/tests/test_filereader.py::BigEndian_Tests::test_big_endian_PixelArray",
"pydicom/tests/test_filereader.py::JPEG2000Tests::testJPEG2000",
"pydicom/tests/test_filereader.py::JPEG2000Tests::testJPEG2000PixelArray",
"pydicom/tests/test_filereader.py::JPEG2000Tests::test_emri_JPEG2000PixelArray",
"pydicom/tests/test_filereader.py::JPEGlossyTests::testJPEGBaselineColor3DPixelArray",
"pydicom/tests/test_filereader.py::JPEGlossyTests::testJPEGlossy",
"pydicom/tests/test_filereader.py::JPEGlossyTests::testJPEGlossyPixelArray",
"pydicom/tests/test_filereader.py::JPEGlosslessTests::testJPEGlossless",
"pydicom/tests/test_filereader.py::JPEGlosslessTests::testJPEGlosslessPixelArray",
"pydicom/tests/test_filereader.py::ReadTruncatedFileTests::testReadFileWithMissingPixelData",
"pydicom/tests/test_filereader.py::FileLikeTests::testReadFileGivenFileLikeObject",
"pydicom/tests/test_filereader.py::FileLikeTests::testReadFileGivenFileObject"
] | [] | MIT License | 1,504 | 2,277 | [
"pydicom/filereader.py",
"pydicom/sequence.py"
] |
|
awslabs__aws-cfn-template-flip-22 | 68a80c5903ecae27703165db35f8693aed5fff85 | 2017-07-23 18:44:01 | 68a80c5903ecae27703165db35f8693aed5fff85 | diff --git a/cfn_flip/__init__.py b/cfn_flip/__init__.py
index 34e9257..d5b112a 100644
--- a/cfn_flip/__init__.py
+++ b/cfn_flip/__init__.py
@@ -10,19 +10,11 @@ or in the "license" file accompanying this file. This file is distributed on an
from .clean import clean
from .custom_json import DateTimeAwareJsonEncoder
-from .custom_yaml import custom_yaml
+from .custom_yaml import CustomDumper, CustomLoader
import collections
import json
+import yaml
-class MyDumper(custom_yaml.Dumper):
- """
- Indent block sequences from parent using more common style
- (" - entry" vs "- entry").
- Causes fewer problems with validation and tools.
- """
-
- def increase_indent(self,flow=False, indentless=False):
- return super(MyDumper,self).increase_indent(flow, False)
def to_json(template, clean_up=False):
"""
@@ -30,7 +22,7 @@ def to_json(template, clean_up=False):
undoing yaml short syntax where detected
"""
- data = custom_yaml.load(template)
+ data = yaml.load(template, Loader=CustomLoader)
if clean_up:
data = clean(data)
@@ -48,7 +40,7 @@ def to_yaml(template, clean_up=False):
if clean_up:
data = clean(data)
- return custom_yaml.dump(data, Dumper=MyDumper, default_flow_style=False)
+ return yaml.dump(data, Dumper=CustomDumper, default_flow_style=False)
def flip(template, clean_up=False):
"""
diff --git a/cfn_flip/custom_yaml.py b/cfn_flip/custom_yaml.py
index a2ff89c..b1ea022 100644
--- a/cfn_flip/custom_yaml.py
+++ b/cfn_flip/custom_yaml.py
@@ -8,16 +8,30 @@ Licensed under the Apache License, Version 2.0 (the "License"). You may not use
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
-import imp
import six
import collections
+import yaml
-custom_yaml = imp.load_module("custom_yaml", *imp.find_module("yaml"))
TAG_MAP = "tag:yaml.org,2002:map"
TAG_STRING = "tag:yaml.org,2002:str"
UNCONVERTED_SUFFIXES = ["Ref", "Condition"]
+class CustomDumper(yaml.Dumper):
+ """
+ Indent block sequences from parent using more common style
+ (" - entry" vs "- entry").
+ Causes fewer problems with validation and tools.
+ """
+
+ def increase_indent(self,flow=False, indentless=False):
+ return super(CustomDumper,self).increase_indent(flow, False)
+
+
+class CustomLoader(yaml.Loader):
+ pass
+
+
def multi_constructor(loader, tag_suffix, node):
"""
Deal with !Ref style function format
@@ -30,11 +44,11 @@ def multi_constructor(loader, tag_suffix, node):
if tag_suffix == "Fn::GetAtt":
constructor = construct_getatt
- elif isinstance(node, custom_yaml.ScalarNode):
+ elif isinstance(node, yaml.ScalarNode):
constructor = loader.construct_scalar
- elif isinstance(node, custom_yaml.SequenceNode):
+ elif isinstance(node, yaml.SequenceNode):
constructor = loader.construct_sequence
- elif isinstance(node, custom_yaml.MappingNode):
+ elif isinstance(node, yaml.MappingNode):
constructor = loader.construct_mapping
else:
raise "Bad tag: !{}".format(tag_suffix)
@@ -126,8 +140,8 @@ def representer(dumper, data):
return dumper.represent_scalar(tag, data)
# Customise our yaml
-custom_yaml.add_representer(six.text_type, lambda dumper, value: dumper.represent_scalar(TAG_STRING, value))
-custom_yaml.add_constructor(TAG_MAP, construct_mapping)
-custom_yaml.add_multi_constructor("!", multi_constructor)
-custom_yaml.add_representer(collections.OrderedDict, representer)
-custom_yaml.add_representer(dict, representer)
+CustomDumper.add_representer(six.text_type, lambda dumper, value: dumper.represent_scalar(TAG_STRING, value))
+CustomLoader.add_constructor(TAG_MAP, construct_mapping)
+CustomLoader.add_multi_constructor("!", multi_constructor)
+CustomDumper.add_representer(collections.OrderedDict, representer)
+CustomDumper.add_representer(dict, representer)
diff --git a/setup.py b/setup.py
index 52de37c..e6acc7c 100644
--- a/setup.py
+++ b/setup.py
@@ -24,6 +24,7 @@ setup(
"six",
],
zip_safe=False,
+ test_suite="tests",
entry_points={
"console_scripts": ["cfn-flip=cfn_flip.main:main"],
},
| yaml ordereddict fix breaks when yaml is a .egg
The fix for #14 doesn't work in all cases. When PyYAML is installed in a .egg file the load of yaml as custom_yaml fails. For troposphere, [here](https://travis-ci.org/cloudtools/troposphere/jobs/256102858) is an example of the tests failing. The issue is the imp module does not know how how to handle hooks to zipimport. I have yet to find a good alternate solution to this [code](https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_flip/custom_yaml.py#L15). | awslabs/aws-cfn-template-flip | diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_flip.py b/tests/test_flip.py
index 17d7236..fc50b67 100644
--- a/tests/test_flip.py
+++ b/tests/test_flip.py
@@ -9,9 +9,10 @@ or in the "license" file accompanying this file. This file is distributed on an
"""
import cfn_flip
+from cfn_flip.custom_yaml import CustomLoader
import json
import unittest
-from cfn_flip.custom_yaml import custom_yaml
+import yaml
class CfnFlipTestCase(unittest.TestCase):
@@ -33,10 +34,10 @@ class CfnFlipTestCase(unittest.TestCase):
self.clean_yaml = f.read()
self.parsed_json = json.loads(self.input_json)
- self.parsed_yaml = custom_yaml.load(self.input_yaml)
+ self.parsed_yaml = yaml.load(self.input_yaml, Loader=CustomLoader)
self.parsed_clean_json = json.loads(self.clean_json)
- self.parsed_clean_yaml = custom_yaml.load(self.clean_yaml)
+ self.parsed_clean_yaml = yaml.load(self.clean_yaml, Loader=CustomLoader)
self.bad_data = "<!DOCTYPE html>\n\n<html>\n\tThis isn't right!\n</html>"
@@ -76,7 +77,7 @@ class CfnFlipTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
json.loads(actual)
- parsed_actual = custom_yaml.load(actual)
+ parsed_actual = yaml.load(actual, Loader=CustomLoader)
self.assertDictEqual(parsed_actual, self.parsed_yaml)
@@ -111,7 +112,7 @@ class CfnFlipTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
json.loads(actual)
- parsed_actual = custom_yaml.load(actual)
+ parsed_actual = yaml.load(actual, Loader=CustomLoader)
self.assertDictEqual(parsed_actual, self.parsed_yaml)
@@ -139,7 +140,7 @@ class CfnFlipTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
json.loads(actual)
- parsed_actual = custom_yaml.load(actual)
+ parsed_actual = yaml.load(actual, Loader=CustomLoader)
self.assertDictEqual(parsed_actual, self.parsed_clean_yaml)
diff --git a/tests/test_yaml_patching.py b/tests/test_yaml_patching.py
index 5079357..db23442 100644
--- a/tests/test_yaml_patching.py
+++ b/tests/test_yaml_patching.py
@@ -9,6 +9,7 @@ or in the "license" file accompanying this file. This file is distributed on an
"""
import cfn_flip
+import collections
import json
import unittest
import yaml
@@ -19,7 +20,7 @@ class YamlPatchTestCase(unittest.TestCase):
Check that we don't patch yaml for everybody
"""
- def test_yaml_ordered_dict(self):
+ def test_yaml_no_ordered_dict(self):
"""
cfn-flip patches yaml to use OrderedDict by default
Check that we don't do this for folks who import cfn_flip and yaml
@@ -29,3 +30,14 @@ class YamlPatchTestCase(unittest.TestCase):
data = yaml.load(yaml_string)
self.assertEqual(type(data), dict)
+
+ def test_yaml_no_ordered_dict(self):
+ """
+ cfn-flip patches yaml to use OrderedDict by default
+ Check that we do this for normal cfn_flip use cases
+ """
+
+ yaml_string = "key: value"
+ data = yaml.load(yaml_string, Loader=cfn_flip.CustomLoader)
+
+ self.assertEqual(type(data), collections.OrderedDict)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-asyncio",
"pytest-mock",
"Faker",
"aresponses",
"boto3",
"pytest-timeout"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohttp==3.8.6
aiosignal==1.3.1
aresponses==3.0.0
async-timeout==4.0.3
asynctest==0.13.0
attrs==24.2.0
boto3==1.33.13
botocore==1.33.13
certifi @ file:///croot/certifi_1671487769961/work/certifi
-e git+https://github.com/awslabs/aws-cfn-template-flip.git@68a80c5903ecae27703165db35f8693aed5fff85#egg=cfn_flip
charset-normalizer==3.4.1
exceptiongroup==1.2.2
Faker==18.13.0
frozenlist==1.3.3
idna==3.10
importlib-metadata==6.7.0
iniconfig==2.0.0
jmespath==1.0.1
multidict==6.0.5
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
pytest-asyncio==0.21.2
pytest-mock==3.11.1
pytest-timeout==2.3.1
python-dateutil==2.9.0.post0
PyYAML==6.0.1
s3transfer==0.8.2
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
urllib3==1.26.20
yarl==1.9.4
zipp==3.15.0
| name: aws-cfn-template-flip
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohttp==3.8.6
- aiosignal==1.3.1
- aresponses==3.0.0
- async-timeout==4.0.3
- asynctest==0.13.0
- attrs==24.2.0
- boto3==1.33.13
- botocore==1.33.13
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- faker==18.13.0
- frozenlist==1.3.3
- idna==3.10
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- jmespath==1.0.1
- multidict==6.0.5
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-mock==3.11.1
- pytest-timeout==2.3.1
- python-dateutil==2.9.0.post0
- pyyaml==6.0.1
- s3transfer==0.8.2
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- urllib3==1.26.20
- yarl==1.9.4
- zipp==3.15.0
prefix: /opt/conda/envs/aws-cfn-template-flip
| [
"tests/test_flip.py::CfnFlipTestCase::test_flip_to_clean_json",
"tests/test_flip.py::CfnFlipTestCase::test_flip_to_clean_yaml",
"tests/test_flip.py::CfnFlipTestCase::test_flip_to_json",
"tests/test_flip.py::CfnFlipTestCase::test_flip_to_json_with_condition",
"tests/test_flip.py::CfnFlipTestCase::test_flip_to_json_with_datetimes",
"tests/test_flip.py::CfnFlipTestCase::test_flip_to_yaml",
"tests/test_flip.py::CfnFlipTestCase::test_flip_to_yaml_with_clean_getatt",
"tests/test_flip.py::CfnFlipTestCase::test_flip_with_bad_data",
"tests/test_flip.py::CfnFlipTestCase::test_getatt_from_yaml",
"tests/test_flip.py::CfnFlipTestCase::test_to_json_with_json",
"tests/test_flip.py::CfnFlipTestCase::test_to_json_with_yaml",
"tests/test_flip.py::CfnFlipTestCase::test_to_yaml_with_json",
"tests/test_flip.py::CfnFlipTestCase::test_to_yaml_with_yaml",
"tests/test_yaml_patching.py::YamlPatchTestCase::test_yaml_no_ordered_dict"
] | [] | [] | [] | Apache License 2.0 | 1,506 | 1,164 | [
"cfn_flip/__init__.py",
"cfn_flip/custom_yaml.py",
"setup.py"
] |
|
dask__dask-2543 | 6d58b523a53bee22a76ea9860ca1a131b2f9312d | 2017-07-24 21:35:29 | c560965c8fc0da7cbc0920d43b7011d2721307d3 | jakirkham: Looks much cleaner than the original code. Thanks for making this change. | diff --git a/dask/array/core.py b/dask/array/core.py
index c880fa0cd..3f3f222db 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -2842,12 +2842,10 @@ def where(condition, x=None, y=None):
x = broadcast_to(x, shape).astype(dtype)
y = broadcast_to(y, shape).astype(dtype)
- if isinstance(condition, (bool, np.bool8)):
- if condition:
- return x
- else:
- return y
+ if np.isscalar(condition):
+ return x if condition else y
else:
+ condition = asarray(condition).astype('bool')
return choose(condition, [y, x])
@@ -3955,6 +3953,11 @@ def repeat(a, repeats, axis=None):
if not isinstance(repeats, Integral):
raise NotImplementedError("Only integer valued repeats supported")
+ if -a.ndim <= axis < 0:
+ axis += a.ndim
+ elif not 0 <= axis <= a.ndim - 1:
+ raise ValueError("axis(=%d) out of bounds" % axis)
+
if repeats == 1:
return a
| da.where breaks on non-bool condition
Using dask 0.15.1, numpy 1.13.1, Python 2.7, the following code throws an exception with dask, while the equivalent numpy code works correctly:
```python
>>> import numpy as np
>>> import dask.array as da
>>> a = da.arange(4, dtype=np.uint8, chunks=(4,))
>>> da.where(a, 1, 0).compute()
Traceback (most recent call last):
File "dask-where.py", line 6, in <module>
da.where(a, 1, 0).compute()
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/base.py", line 98, in compute
(result,) = compute(self, traverse=False, **kwargs)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/base.py", line 205, in compute
results = get(dsk, keys, **kwargs)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/threaded.py", line 75, in get
pack_exception=pack_exception, **kwargs)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/local.py", line 521, in get_async
raise_exception(exc, tb)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/local.py", line 290, in execute_task
result = _execute_task(task, data)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/local.py", line 271, in _execute_task
return func(*args2)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/array/core.py", line 2796, in variadic_choose
return np.choose(a, choices)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/numpy/core/fromnumeric.py", line 354, in choose
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/numpy/core/fromnumeric.py", line 57, in _wrapfunc
return getattr(obj, method)(*args, **kwds)
ValueError: invalid entry in choice array
```
From what I can see, `da.where` is being implemented using `np.choose` rather than `np.where`, and so `np.choose` is trying to use the raw value of the condition rather than its truthiness to index one of the two outputs. | dask/dask | diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index 701d2674a..61934ac1b 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -686,11 +686,15 @@ def test_choose():
def test_where():
x = np.random.randint(10, size=(15, 16))
+ x[5, 5] = x[4, 4] = 0 # Ensure some false elements
d = from_array(x, chunks=(4, 5))
y = np.random.randint(10, size=15).astype(np.uint8)
e = from_array(y, chunks=(4,))
for c1, c2 in [(d > 5, x > 5),
+ (d, x),
+ (1, 1),
+ (0, 0),
(True, True),
(np.True_, np.True_),
(False, False),
@@ -698,7 +702,6 @@ def test_where():
for b1, b2 in [(0, 0), (-e[:, None], -y[:, None])]:
w1 = where(c1, d, b1)
w2 = np.where(c2, x, b2)
-
assert_eq(w1, w2)
@@ -708,7 +711,7 @@ def test_where_bool_optimization():
y = np.random.randint(10, size=(15, 16))
e = from_array(y, chunks=(4, 5))
- for c in [True, False, np.True_, np.False_]:
+ for c in [True, False, np.True_, np.False_, 1, 0]:
w1 = where(c, d, e)
w2 = np.where(c, x, y)
@@ -2764,7 +2767,7 @@ def test_repeat():
d = da.from_array(x, chunks=(4, 5, 3))
repeats = [1, 2, 5]
- axes = [0, 1, 2]
+ axes = [-3, -2, -1, 0, 1, 2]
for r in repeats:
for a in axes:
@@ -2781,6 +2784,10 @@ def test_repeat():
with pytest.raises(NotImplementedError):
da.repeat(d, 2)
+ for invalid_axis in [3, -4]:
+ with pytest.raises(ValueError):
+ da.repeat(d, 2, axis=invalid_axis)
+
x = np.arange(5)
d = da.arange(5, chunks=(2,))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"pytest-cov",
"pytest-mock",
"pytest-asyncio",
"numpy>=1.16.0",
"pandas>=1.0.0"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs==22.2.0
botocore==1.23.24
certifi==2021.5.30
charset-normalizer==3.0.1
click==8.0.4
cloudpickle==2.2.1
coverage==6.2
-e git+https://github.com/dask/dask.git@6d58b523a53bee22a76ea9860ca1a131b2f9312d#egg=dask
distributed==1.19.3
execnet==1.9.0
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata==4.8.3
iniconfig==1.1.1
jmespath==0.10.0
locket==1.0.0
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging==21.3
pandas==1.1.5
partd==1.2.0
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
s3fs==2022.1.0
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions==4.1.1
urllib3==1.26.20
wrapt==1.16.0
yarl==1.7.2
zict==2.1.0
zipp==3.6.0
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- attrs==22.2.0
- botocore==1.23.24
- charset-normalizer==3.0.1
- click==8.0.4
- cloudpickle==2.2.1
- coverage==6.2
- distributed==1.19.3
- execnet==1.9.0
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jmespath==0.10.0
- locket==1.0.0
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- partd==1.2.0
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- s3fs==2022.1.0
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- wrapt==1.16.0
- yarl==1.7.2
- zict==2.1.0
- zipp==3.6.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_array_core.py::test_where",
"dask/array/tests/test_array_core.py::test_where_bool_optimization",
"dask/array/tests/test_array_core.py::test_repeat"
] | [
"dask/array/tests/test_array_core.py::test_concatenate_unknown_axes",
"dask/array/tests/test_array_core.py::test_field_access",
"dask/array/tests/test_array_core.py::test_field_access_with_shape",
"dask/array/tests/test_array_core.py::test_matmul",
"dask/array/tests/test_array_core.py::test_to_dask_dataframe"
] | [
"dask/array/tests/test_array_core.py::test_getem",
"dask/array/tests/test_array_core.py::test_top",
"dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules",
"dask/array/tests/test_array_core.py::test_concatenate3_on_scalars",
"dask/array/tests/test_array_core.py::test_chunked_dot_product",
"dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one",
"dask/array/tests/test_array_core.py::test_transpose",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions",
"dask/array/tests/test_array_core.py::test_Array",
"dask/array/tests/test_array_core.py::test_uneven_chunks",
"dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims",
"dask/array/tests/test_array_core.py::test_keys",
"dask/array/tests/test_array_core.py::test_Array_computation",
"dask/array/tests/test_array_core.py::test_stack",
"dask/array/tests/test_array_core.py::test_short_stack",
"dask/array/tests/test_array_core.py::test_stack_scalars",
"dask/array/tests/test_array_core.py::test_stack_promote_type",
"dask/array/tests/test_array_core.py::test_stack_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate",
"dask/array/tests/test_array_core.py::test_concatenate_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings",
"dask/array/tests/test_array_core.py::test_vstack",
"dask/array/tests/test_array_core.py::test_hstack",
"dask/array/tests/test_array_core.py::test_dstack",
"dask/array/tests/test_array_core.py::test_take",
"dask/array/tests/test_array_core.py::test_compress",
"dask/array/tests/test_array_core.py::test_binops",
"dask/array/tests/test_array_core.py::test_isnull",
"dask/array/tests/test_array_core.py::test_isclose",
"dask/array/tests/test_array_core.py::test_broadcast_shapes",
"dask/array/tests/test_array_core.py::test_elemwise_on_scalars",
"dask/array/tests/test_array_core.py::test_partial_by_order",
"dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays",
"dask/array/tests/test_array_core.py::test_elemwise_differently_chunked",
"dask/array/tests/test_array_core.py::test_operators",
"dask/array/tests/test_array_core.py::test_operator_dtype_promotion",
"dask/array/tests/test_array_core.py::test_tensordot",
"dask/array/tests/test_array_core.py::test_tensordot_2[0]",
"dask/array/tests/test_array_core.py::test_tensordot_2[1]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes2]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes3]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes4]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes5]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes6]",
"dask/array/tests/test_array_core.py::test_dot_method",
"dask/array/tests/test_array_core.py::test_T",
"dask/array/tests/test_array_core.py::test_norm",
"dask/array/tests/test_array_core.py::test_choose",
"dask/array/tests/test_array_core.py::test_where_has_informative_error",
"dask/array/tests/test_array_core.py::test_coarsen",
"dask/array/tests/test_array_core.py::test_coarsen_with_excess",
"dask/array/tests/test_array_core.py::test_insert",
"dask/array/tests/test_array_core.py::test_multi_insert",
"dask/array/tests/test_array_core.py::test_broadcast_to",
"dask/array/tests/test_array_core.py::test_broadcast_to_array",
"dask/array/tests/test_array_core.py::test_broadcast_to_scalar",
"dask/array/tests/test_array_core.py::test_ravel",
"dask/array/tests/test_array_core.py::test_roll[None-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape0-new_shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape1-new_shape1-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape2-new_shape2-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape3-new_shape3-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape4-new_shape4-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape5-new_shape5-chunks5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape6-new_shape6-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape7-new_shape7-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape8-new_shape8-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape9-new_shape9-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape10-new_shape10-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape11-new_shape11-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape12-new_shape12-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape13-new_shape13-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape14-new_shape14-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape15-new_shape15-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape16-new_shape16-chunks16]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape17-new_shape17-3]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape18-new_shape18-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape19-new_shape19-chunks19]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape20-new_shape20-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape21-new_shape21-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape22-new_shape22-24]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape23-new_shape23-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape24-new_shape24-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape25-new_shape25-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape26-new_shape26-chunks26]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape27-new_shape27-chunks27]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape28-new_shape28-chunks28]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape29-new_shape29-chunks29]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape30-new_shape30-chunks30]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape31-new_shape31-chunks31]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape32-new_shape32-chunks32]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape33-new_shape33-chunks33]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape34-new_shape34-chunks34]",
"dask/array/tests/test_array_core.py::test_reshape_exceptions",
"dask/array/tests/test_array_core.py::test_reshape_splat",
"dask/array/tests/test_array_core.py::test_reshape_fails_for_dask_only",
"dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions",
"dask/array/tests/test_array_core.py::test_full",
"dask/array/tests/test_array_core.py::test_map_blocks",
"dask/array/tests/test_array_core.py::test_map_blocks2",
"dask/array/tests/test_array_core.py::test_map_blocks_with_constants",
"dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs",
"dask/array/tests/test_array_core.py::test_map_blocks_with_chunks",
"dask/array/tests/test_array_core.py::test_map_blocks_dtype_inference",
"dask/array/tests/test_array_core.py::test_fromfunction",
"dask/array/tests/test_array_core.py::test_from_function_requires_block_args",
"dask/array/tests/test_array_core.py::test_repr",
"dask/array/tests/test_array_core.py::test_slicing_with_ellipsis",
"dask/array/tests/test_array_core.py::test_slicing_with_ndarray",
"dask/array/tests/test_array_core.py::test_dtype",
"dask/array/tests/test_array_core.py::test_blockdims_from_blockshape",
"dask/array/tests/test_array_core.py::test_coerce",
"dask/array/tests/test_array_core.py::test_store_delayed_target",
"dask/array/tests/test_array_core.py::test_store",
"dask/array/tests/test_array_core.py::test_store_regions",
"dask/array/tests/test_array_core.py::test_store_compute_false",
"dask/array/tests/test_array_core.py::test_store_locks",
"dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions",
"dask/array/tests/test_array_core.py::test_unique",
"dask/array/tests/test_array_core.py::test_dtype_complex",
"dask/array/tests/test_array_core.py::test_astype",
"dask/array/tests/test_array_core.py::test_arithmetic",
"dask/array/tests/test_array_core.py::test_elemwise_consistent_names",
"dask/array/tests/test_array_core.py::test_optimize",
"dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays",
"dask/array/tests/test_array_core.py::test_getter",
"dask/array/tests/test_array_core.py::test_squeeze",
"dask/array/tests/test_array_core.py::test_size",
"dask/array/tests/test_array_core.py::test_nbytes",
"dask/array/tests/test_array_core.py::test_itemsize",
"dask/array/tests/test_array_core.py::test_Array_normalizes_dtype",
"dask/array/tests/test_array_core.py::test_from_array_with_lock",
"dask/array/tests/test_array_core.py::test_from_array_no_asarray",
"dask/array/tests/test_array_core.py::test_from_array_getitem",
"dask/array/tests/test_array_core.py::test_asarray",
"dask/array/tests/test_array_core.py::test_asanyarray",
"dask/array/tests/test_array_core.py::test_from_func",
"dask/array/tests/test_array_core.py::test_topk",
"dask/array/tests/test_array_core.py::test_topk_k_bigger_than_chunk",
"dask/array/tests/test_array_core.py::test_bincount",
"dask/array/tests/test_array_core.py::test_bincount_with_weights",
"dask/array/tests/test_array_core.py::test_bincount_raises_informative_error_on_missing_minlength_kwarg",
"dask/array/tests/test_array_core.py::test_digitize",
"dask/array/tests/test_array_core.py::test_histogram",
"dask/array/tests/test_array_core.py::test_histogram_alternative_bins_range",
"dask/array/tests/test_array_core.py::test_histogram_return_type",
"dask/array/tests/test_array_core.py::test_histogram_extra_args_and_shapes",
"dask/array/tests/test_array_core.py::test_concatenate3_2",
"dask/array/tests/test_array_core.py::test_map_blocks3",
"dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks",
"dask/array/tests/test_array_core.py::test_take_dask_from_numpy",
"dask/array/tests/test_array_core.py::test_normalize_chunks",
"dask/array/tests/test_array_core.py::test_raise_on_no_chunks",
"dask/array/tests/test_array_core.py::test_chunks_is_immutable",
"dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs",
"dask/array/tests/test_array_core.py::test_long_slice",
"dask/array/tests/test_array_core.py::test_ellipsis_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice",
"dask/array/tests/test_array_core.py::test_slice_with_floats",
"dask/array/tests/test_array_core.py::test_vindex_errors",
"dask/array/tests/test_array_core.py::test_vindex_merge",
"dask/array/tests/test_array_core.py::test_empty_array",
"dask/array/tests/test_array_core.py::test_array",
"dask/array/tests/test_array_core.py::test_cov",
"dask/array/tests/test_array_core.py::test_corrcoef",
"dask/array/tests/test_array_core.py::test_memmap",
"dask/array/tests/test_array_core.py::test_to_npy_stack",
"dask/array/tests/test_array_core.py::test_view",
"dask/array/tests/test_array_core.py::test_view_fortran",
"dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension",
"dask/array/tests/test_array_core.py::test_broadcast_chunks",
"dask/array/tests/test_array_core.py::test_chunks_error",
"dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs",
"dask/array/tests/test_array_core.py::test_dont_fuse_outputs",
"dask/array/tests/test_array_core.py::test_dont_dealias_outputs",
"dask/array/tests/test_array_core.py::test_timedelta_op",
"dask/array/tests/test_array_core.py::test_to_delayed",
"dask/array/tests/test_array_core.py::test_to_delayed_optimizes",
"dask/array/tests/test_array_core.py::test_cumulative",
"dask/array/tests/test_array_core.py::test_eye",
"dask/array/tests/test_array_core.py::test_diag",
"dask/array/tests/test_array_core.py::test_tril_triu",
"dask/array/tests/test_array_core.py::test_tril_triu_errors",
"dask/array/tests/test_array_core.py::test_atop_names",
"dask/array/tests/test_array_core.py::test_atop_new_axes",
"dask/array/tests/test_array_core.py::test_atop_kwargs",
"dask/array/tests/test_array_core.py::test_atop_chunks",
"dask/array/tests/test_array_core.py::test_from_delayed",
"dask/array/tests/test_array_core.py::test_A_property",
"dask/array/tests/test_array_core.py::test_copy_mutate",
"dask/array/tests/test_array_core.py::test_npartitions",
"dask/array/tests/test_array_core.py::test_astype_gh1151",
"dask/array/tests/test_array_core.py::test_elemwise_name",
"dask/array/tests/test_array_core.py::test_map_blocks_name",
"dask/array/tests/test_array_core.py::test_from_array_names",
"dask/array/tests/test_array_core.py::test_array_picklable",
"dask/array/tests/test_array_core.py::test_swapaxes",
"dask/array/tests/test_array_core.py::test_from_array_raises_on_bad_chunks",
"dask/array/tests/test_array_core.py::test_concatenate_axes",
"dask/array/tests/test_array_core.py::test_atop_concatenate",
"dask/array/tests/test_array_core.py::test_common_blockdim",
"dask/array/tests/test_array_core.py::test_uneven_chunks_that_fit_neatly",
"dask/array/tests/test_array_core.py::test_elemwise_uneven_chunks",
"dask/array/tests/test_array_core.py::test_uneven_chunks_atop",
"dask/array/tests/test_array_core.py::test_warn_bad_rechunking",
"dask/array/tests/test_array_core.py::test_optimize_fuse_keys",
"dask/array/tests/test_array_core.py::test_round",
"dask/array/tests/test_array_core.py::test_tile[0-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[0-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[1-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[1-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[2-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[2-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[3-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[3-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[5-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[5-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-1-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-1-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-5-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-5-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps0-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps0-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps1-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps1-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_concatenate_stack_dont_warn",
"dask/array/tests/test_array_core.py::test_map_blocks_delayed",
"dask/array/tests/test_array_core.py::test_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_2d",
"dask/array/tests/test_array_core.py::test_no_chunks_yes_chunks",
"dask/array/tests/test_array_core.py::test_raise_informative_errors_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_slicing_2d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_1d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_2d",
"dask/array/tests/test_array_core.py::test_setitem_1d",
"dask/array/tests/test_array_core.py::test_setitem_2d",
"dask/array/tests/test_array_core.py::test_setitem_errs",
"dask/array/tests/test_array_core.py::test_zero_slice_dtypes",
"dask/array/tests/test_array_core.py::test_zero_sized_array_rechunk",
"dask/array/tests/test_array_core.py::test_atop_zero_shape",
"dask/array/tests/test_array_core.py::test_atop_zero_shape_new_axes",
"dask/array/tests/test_array_core.py::test_broadcast_against_zero_shape",
"dask/array/tests/test_array_core.py::test_fast_from_array",
"dask/array/tests/test_array_core.py::test_random_from_array",
"dask/array/tests/test_array_core.py::test_concatenate_errs",
"dask/array/tests/test_array_core.py::test_stack_errs",
"dask/array/tests/test_array_core.py::test_transpose_negative_axes",
"dask/array/tests/test_array_core.py::test_atop_with_numpy_arrays",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-6]",
"dask/array/tests/test_array_core.py::test_constructor_plugin",
"dask/array/tests/test_array_core.py::test_no_warnings_on_metadata"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,512 | 295 | [
"dask/array/core.py"
] |
dask__dask-2544 | 6d58b523a53bee22a76ea9860ca1a131b2f9312d | 2017-07-24 21:46:17 | c560965c8fc0da7cbc0920d43b7011d2721307d3 | jakirkham: Thanks @jcrist. LGTM. | diff --git a/dask/array/core.py b/dask/array/core.py
index c880fa0cd..2371200c4 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -3955,6 +3955,11 @@ def repeat(a, repeats, axis=None):
if not isinstance(repeats, Integral):
raise NotImplementedError("Only integer valued repeats supported")
+ if -a.ndim <= axis < 0:
+ axis += a.ndim
+ elif not 0 <= axis <= a.ndim - 1:
+ raise ValueError("axis(=%d) out of bounds" % axis)
+
if repeats == 1:
return a
| Dask array repeat mishandles negative axis
When providing `repeat` with a valid negative value for `axis`, it appears to mishandle and raise an `IndexError` instead. An example of the problem is included in the details below along with the environment used to reproduce it.
Note: These all come from `conda-forge` where possible, but it seems `conda` stopped exporting that info. Have raised that problem in issue ( https://github.com/conda/conda/issues/5706 ).
Traceback:
<details>
```python
In [1]: import dask.array as da
In [2]: a = da.ones((4, 6), chunks=(2, 3))
In [3]: da.repeat(a, 2, axis=1)
Out[3]: dask.array<concatenate, shape=(4, 12), dtype=float64, chunksize=(2, 6)>
In [4]: da.repeat(a, 2, axis=-1)
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-1ea79a1dccaa> in <module>()
----> 1 da.repeat(a, 2, axis=-1)
/zopt/conda2/envs/daskenv/lib/python2.7/site-packages/dask/array/core.pyc in repeat(a, repeats, axis)
3971 for s in slices]
3972
-> 3973 slabs = [a[slc] for slc in slices]
3974
3975 out = []
/zopt/conda2/envs/daskenv/lib/python2.7/site-packages/dask/array/core.pyc in __getitem__(self, index)
1233 return self
1234
-> 1235 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1236
1237 dsk2 = sharedict.merge(self.dask, (out, dsk))
/zopt/conda2/envs/daskenv/lib/python2.7/site-packages/dask/array/slicing.pyc in slice_array(out_name, in_name, blockdims, index)
158
159 # Pass down to next function
--> 160 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
161
162 bd_out = tuple(map(tuple, bd_out))
/zopt/conda2/envs/daskenv/lib/python2.7/site-packages/dask/array/slicing.pyc in slice_with_newaxes(out_name, in_name, blockdims, index)
180
181 # Pass down and do work
--> 182 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
183
184 if where_none:
/zopt/conda2/envs/daskenv/lib/python2.7/site-packages/dask/array/slicing.pyc in slice_wrap_lists(out_name, in_name, blockdims, index)
220 for i in index)
221 if not len(blockdims) == len(index):
--> 222 raise IndexError("Too many indices for array")
223
224 for bd_size, i in zip(shape, index):
IndexError: Too many indices for array
```
</details>
<br>
Environment:
<br>
<br>
<details>
```yaml
name: daskenv
channels:
- conda-forge
- defaults
dependencies:
- alabaster=0.7.10=py27_0
- appnope=0.1.0=py27_0
- asn1crypto=0.22.0=py27_0
- babel=2.3.4=py27_0
- backports.lzma=0.0.8=py27_0
- backports.shutil_get_terminal_size=1.0.0=py27_1
- backports_abc=0.5=py27_0
- bkcharts=0.2=py27_0
- blas=1.1=openblas
- bokeh=0.12.6=py27_0
- boto3=1.4.4=py27_0
- botocore=1.5.84=py27_0
- ca-certificates=2017.4.17=0
- certifi=2017.4.17=py27_0
- cffi=1.10.0=py27_0
- chardet=3.0.2=py27_1
- click=6.7=py27_0
- cloudpickle=0.3.1=py27_0
- configparser=3.5.0=py27_0
- coverage=4.4.1=py27_0
- cryptography=1.9=py27_0
- cycler=0.10.0=py27_0
- dask=0.15.1=py27_0
- decorator=4.1.1=py27_0
- distributed=1.18.0=py27_0
- docutils=0.13.1=py27_0
- enum34=1.1.6=py27_1
- flake8=3.3.0=py27_0
- freetype=2.7=1
- functools32=3.2.3.2=py27_1
- futures=3.0.5=py27_0
- graphviz=2.38.0=4
- h5py=2.7.0=np113py27_1
- hdf5=1.8.18=0
- heapdict=1.0.0=py27_0
- idna=2.5=py27_0
- imagesize=0.7.1=py27_0
- ipaddress=1.0.18=py27_0
- ipython=5.4.1=py27_0
- ipython_genutils=0.2.0=py27_0
- jinja2=2.9.5=py27_0
- jmespath=0.9.3=py27_0
- jpeg=9b=0
- libffi=3.2.1=3
- libgfortran=3.0.0=0
- libpng=1.6.28=0
- libtiff=4.0.6=7
- locket=0.2.0=py27_1
- markupsafe=1.0=py27_0
- matplotlib=2.0.2=np113py27_0
- mccabe=0.6.1=py27_0
- msgpack-python=0.4.8=py27_0
- ncurses=5.9=10
- numpy=1.13.1=py27_blas_openblas_200
- numpydoc=0.7.0=py27_0
- openblas=0.2.19=2
- openssl=1.0.2l=0
- pandas=0.20.3=py27_1
- partd=0.3.8=py27_0
- pathlib2=2.3.0=py27_0
- pexpect=4.2.1=py27_0
- pickleshare=0.7.3=py27_0
- pip=9.0.1=py27_0
- prompt_toolkit=1.0.14=py27_0
- psutil=5.2.1=py27_0
- ptyprocess=0.5.2=py27_0
- py=1.4.34=py27_0
- pycodestyle=2.3.1=py27_0
- pycparser=2.18=py27_0
- pyfftw=0.10.4=np113py27_1
- pyflakes=1.5.0=py27_0
- pygments=2.2.0=py27_0
- pyopenssl=16.2.0=py27_0
- pyparsing=2.2.0=py27_0
- pysocks=1.6.7=py27_0
- pytest=3.1.3=py27_0
- pytest-cov=2.5.1=py27_0
- python=2.7.13=1
- python-dateutil=2.6.1=py27_0
- python-graphviz=0.7.1=py27_0
- pytz=2017.2=py27_0
- pyyaml=3.12=py27_1
- readline=6.2=0
- requests=2.18.1=py27_0
- s3fs=0.1.2=py27_0
- s3transfer=0.1.10=py27_1
- scandir=1.5=py27_1
- scipy=0.19.1=np113py27_blas_openblas_200
- setuptools=33.1.1=py27_0
- simplegeneric=0.8.1=py27_0
- singledispatch=3.4.0.3=py27_0
- six=1.10.0=py27_1
- snowballstemmer=1.2.1=py27_0
- sortedcontainers=1.5.3=py27_0
- sphinx=1.6.3=py27_0
- sphinx_rtd_theme=0.2.4=py27_0
- sphinxcontrib-websupport=1.0.1=py27_0
- sqlite=3.13.0=1
- ssl_match_hostname=3.5.0.1=py27_1
- subprocess32=3.2.7=py27_0
- tblib=1.3.2=py27_0
- tk=8.5.19=1
- toolz=0.8.2=py27_0
- tornado=4.5.1=py27_0
- traitlets=4.3.2=py27_0
- typing=3.6.1=py27_0
- urllib3=1.21.1=py27_1
- wcwidth=0.1.7=py27_0
- wheel=0.29.0=py27_0
- xz=5.2.2=0
- yaml=0.1.6=0
- zict=0.1.2=py27_0
- zlib=1.2.11=0
- pip:
- backports-abc==0.5
- backports.shutil-get-terminal-size==1.0.0
- backports.ssl-match-hostname==3.5.0.1
- dask (/zopt/conda2/envs/daskenv/lib/python2.7/site-packages)==0.15.1
- ipython-genutils==0.2.0
- prompt-toolkit==1.0.14
- sphinx-rtd-theme==0.2.4
```
</details> | dask/dask | diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index 701d2674a..9a48a0734 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -2764,7 +2764,7 @@ def test_repeat():
d = da.from_array(x, chunks=(4, 5, 3))
repeats = [1, 2, 5]
- axes = [0, 1, 2]
+ axes = [-3, -2, -1, 0, 1, 2]
for r in repeats:
for a in axes:
@@ -2781,6 +2781,10 @@ def test_repeat():
with pytest.raises(NotImplementedError):
da.repeat(d, 2)
+ for invalid_axis in [3, -4]:
+ with pytest.raises(ValueError):
+ da.repeat(d, 2, axis=invalid_axis)
+
x = np.arange(5)
d = da.arange(5, chunks=(2,))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"numpy>=1.16.0",
"pandas>=1.0.0"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
botocore==1.23.24
certifi==2021.5.30
charset-normalizer==3.0.1
click==8.0.4
cloudpickle==2.2.1
-e git+https://github.com/dask/dask.git@6d58b523a53bee22a76ea9860ca1a131b2f9312d#egg=dask
distributed==1.19.3
execnet==1.9.0
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==0.10.0
locket==1.0.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-asyncio==0.16.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
s3fs==2022.1.0
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wrapt==1.16.0
yarl==1.7.2
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- botocore==1.23.24
- charset-normalizer==3.0.1
- click==8.0.4
- cloudpickle==2.2.1
- distributed==1.19.3
- execnet==1.9.0
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- jmespath==0.10.0
- locket==1.0.0
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pytest-asyncio==0.16.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- s3fs==2022.1.0
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- wrapt==1.16.0
- yarl==1.7.2
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_array_core.py::test_repeat"
] | [
"dask/array/tests/test_array_core.py::test_concatenate_unknown_axes",
"dask/array/tests/test_array_core.py::test_field_access",
"dask/array/tests/test_array_core.py::test_field_access_with_shape",
"dask/array/tests/test_array_core.py::test_matmul",
"dask/array/tests/test_array_core.py::test_to_dask_dataframe"
] | [
"dask/array/tests/test_array_core.py::test_getem",
"dask/array/tests/test_array_core.py::test_top",
"dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules",
"dask/array/tests/test_array_core.py::test_concatenate3_on_scalars",
"dask/array/tests/test_array_core.py::test_chunked_dot_product",
"dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one",
"dask/array/tests/test_array_core.py::test_transpose",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions",
"dask/array/tests/test_array_core.py::test_Array",
"dask/array/tests/test_array_core.py::test_uneven_chunks",
"dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims",
"dask/array/tests/test_array_core.py::test_keys",
"dask/array/tests/test_array_core.py::test_Array_computation",
"dask/array/tests/test_array_core.py::test_stack",
"dask/array/tests/test_array_core.py::test_short_stack",
"dask/array/tests/test_array_core.py::test_stack_scalars",
"dask/array/tests/test_array_core.py::test_stack_promote_type",
"dask/array/tests/test_array_core.py::test_stack_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate",
"dask/array/tests/test_array_core.py::test_concatenate_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings",
"dask/array/tests/test_array_core.py::test_vstack",
"dask/array/tests/test_array_core.py::test_hstack",
"dask/array/tests/test_array_core.py::test_dstack",
"dask/array/tests/test_array_core.py::test_take",
"dask/array/tests/test_array_core.py::test_compress",
"dask/array/tests/test_array_core.py::test_binops",
"dask/array/tests/test_array_core.py::test_isnull",
"dask/array/tests/test_array_core.py::test_isclose",
"dask/array/tests/test_array_core.py::test_broadcast_shapes",
"dask/array/tests/test_array_core.py::test_elemwise_on_scalars",
"dask/array/tests/test_array_core.py::test_partial_by_order",
"dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays",
"dask/array/tests/test_array_core.py::test_elemwise_differently_chunked",
"dask/array/tests/test_array_core.py::test_operators",
"dask/array/tests/test_array_core.py::test_operator_dtype_promotion",
"dask/array/tests/test_array_core.py::test_tensordot",
"dask/array/tests/test_array_core.py::test_tensordot_2[0]",
"dask/array/tests/test_array_core.py::test_tensordot_2[1]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes2]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes3]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes4]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes5]",
"dask/array/tests/test_array_core.py::test_tensordot_2[axes6]",
"dask/array/tests/test_array_core.py::test_dot_method",
"dask/array/tests/test_array_core.py::test_T",
"dask/array/tests/test_array_core.py::test_norm",
"dask/array/tests/test_array_core.py::test_choose",
"dask/array/tests/test_array_core.py::test_where",
"dask/array/tests/test_array_core.py::test_where_bool_optimization",
"dask/array/tests/test_array_core.py::test_where_has_informative_error",
"dask/array/tests/test_array_core.py::test_coarsen",
"dask/array/tests/test_array_core.py::test_coarsen_with_excess",
"dask/array/tests/test_array_core.py::test_insert",
"dask/array/tests/test_array_core.py::test_multi_insert",
"dask/array/tests/test_array_core.py::test_broadcast_to",
"dask/array/tests/test_array_core.py::test_broadcast_to_array",
"dask/array/tests/test_array_core.py::test_broadcast_to_scalar",
"dask/array/tests/test_array_core.py::test_ravel",
"dask/array/tests/test_array_core.py::test_roll[None-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[None-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[None-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[0-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[0-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[1-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[1-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[-1-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis4-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-7-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-7-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-9-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-9-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift3-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift3-chunks1]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift4-chunks0]",
"dask/array/tests/test_array_core.py::test_roll[axis5-shift4-chunks1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape0-new_shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape1-new_shape1-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape2-new_shape2-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape3-new_shape3-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape4-new_shape4-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape5-new_shape5-chunks5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape6-new_shape6-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape7-new_shape7-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape8-new_shape8-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape9-new_shape9-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape10-new_shape10-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape11-new_shape11-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape12-new_shape12-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape13-new_shape13-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape14-new_shape14-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape15-new_shape15-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape16-new_shape16-chunks16]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape17-new_shape17-3]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape18-new_shape18-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape19-new_shape19-chunks19]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape20-new_shape20-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape21-new_shape21-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape22-new_shape22-24]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape23-new_shape23-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape24-new_shape24-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape25-new_shape25-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape26-new_shape26-chunks26]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape27-new_shape27-chunks27]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape28-new_shape28-chunks28]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape29-new_shape29-chunks29]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape30-new_shape30-chunks30]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape31-new_shape31-chunks31]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape32-new_shape32-chunks32]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape33-new_shape33-chunks33]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape34-new_shape34-chunks34]",
"dask/array/tests/test_array_core.py::test_reshape_exceptions",
"dask/array/tests/test_array_core.py::test_reshape_splat",
"dask/array/tests/test_array_core.py::test_reshape_fails_for_dask_only",
"dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions",
"dask/array/tests/test_array_core.py::test_full",
"dask/array/tests/test_array_core.py::test_map_blocks",
"dask/array/tests/test_array_core.py::test_map_blocks2",
"dask/array/tests/test_array_core.py::test_map_blocks_with_constants",
"dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs",
"dask/array/tests/test_array_core.py::test_map_blocks_with_chunks",
"dask/array/tests/test_array_core.py::test_map_blocks_dtype_inference",
"dask/array/tests/test_array_core.py::test_fromfunction",
"dask/array/tests/test_array_core.py::test_from_function_requires_block_args",
"dask/array/tests/test_array_core.py::test_repr",
"dask/array/tests/test_array_core.py::test_slicing_with_ellipsis",
"dask/array/tests/test_array_core.py::test_slicing_with_ndarray",
"dask/array/tests/test_array_core.py::test_dtype",
"dask/array/tests/test_array_core.py::test_blockdims_from_blockshape",
"dask/array/tests/test_array_core.py::test_coerce",
"dask/array/tests/test_array_core.py::test_store_delayed_target",
"dask/array/tests/test_array_core.py::test_store",
"dask/array/tests/test_array_core.py::test_store_regions",
"dask/array/tests/test_array_core.py::test_store_compute_false",
"dask/array/tests/test_array_core.py::test_store_locks",
"dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions",
"dask/array/tests/test_array_core.py::test_unique",
"dask/array/tests/test_array_core.py::test_dtype_complex",
"dask/array/tests/test_array_core.py::test_astype",
"dask/array/tests/test_array_core.py::test_arithmetic",
"dask/array/tests/test_array_core.py::test_elemwise_consistent_names",
"dask/array/tests/test_array_core.py::test_optimize",
"dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays",
"dask/array/tests/test_array_core.py::test_getter",
"dask/array/tests/test_array_core.py::test_squeeze",
"dask/array/tests/test_array_core.py::test_size",
"dask/array/tests/test_array_core.py::test_nbytes",
"dask/array/tests/test_array_core.py::test_itemsize",
"dask/array/tests/test_array_core.py::test_Array_normalizes_dtype",
"dask/array/tests/test_array_core.py::test_from_array_with_lock",
"dask/array/tests/test_array_core.py::test_from_array_no_asarray",
"dask/array/tests/test_array_core.py::test_from_array_getitem",
"dask/array/tests/test_array_core.py::test_asarray",
"dask/array/tests/test_array_core.py::test_asanyarray",
"dask/array/tests/test_array_core.py::test_from_func",
"dask/array/tests/test_array_core.py::test_topk",
"dask/array/tests/test_array_core.py::test_topk_k_bigger_than_chunk",
"dask/array/tests/test_array_core.py::test_bincount",
"dask/array/tests/test_array_core.py::test_bincount_with_weights",
"dask/array/tests/test_array_core.py::test_bincount_raises_informative_error_on_missing_minlength_kwarg",
"dask/array/tests/test_array_core.py::test_digitize",
"dask/array/tests/test_array_core.py::test_histogram",
"dask/array/tests/test_array_core.py::test_histogram_alternative_bins_range",
"dask/array/tests/test_array_core.py::test_histogram_return_type",
"dask/array/tests/test_array_core.py::test_histogram_extra_args_and_shapes",
"dask/array/tests/test_array_core.py::test_concatenate3_2",
"dask/array/tests/test_array_core.py::test_map_blocks3",
"dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks",
"dask/array/tests/test_array_core.py::test_take_dask_from_numpy",
"dask/array/tests/test_array_core.py::test_normalize_chunks",
"dask/array/tests/test_array_core.py::test_raise_on_no_chunks",
"dask/array/tests/test_array_core.py::test_chunks_is_immutable",
"dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs",
"dask/array/tests/test_array_core.py::test_long_slice",
"dask/array/tests/test_array_core.py::test_ellipsis_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice",
"dask/array/tests/test_array_core.py::test_slice_with_floats",
"dask/array/tests/test_array_core.py::test_vindex_errors",
"dask/array/tests/test_array_core.py::test_vindex_merge",
"dask/array/tests/test_array_core.py::test_empty_array",
"dask/array/tests/test_array_core.py::test_array",
"dask/array/tests/test_array_core.py::test_cov",
"dask/array/tests/test_array_core.py::test_corrcoef",
"dask/array/tests/test_array_core.py::test_memmap",
"dask/array/tests/test_array_core.py::test_to_npy_stack",
"dask/array/tests/test_array_core.py::test_view",
"dask/array/tests/test_array_core.py::test_view_fortran",
"dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension",
"dask/array/tests/test_array_core.py::test_broadcast_chunks",
"dask/array/tests/test_array_core.py::test_chunks_error",
"dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs",
"dask/array/tests/test_array_core.py::test_dont_fuse_outputs",
"dask/array/tests/test_array_core.py::test_dont_dealias_outputs",
"dask/array/tests/test_array_core.py::test_timedelta_op",
"dask/array/tests/test_array_core.py::test_to_delayed",
"dask/array/tests/test_array_core.py::test_to_delayed_optimizes",
"dask/array/tests/test_array_core.py::test_cumulative",
"dask/array/tests/test_array_core.py::test_eye",
"dask/array/tests/test_array_core.py::test_diag",
"dask/array/tests/test_array_core.py::test_tril_triu",
"dask/array/tests/test_array_core.py::test_tril_triu_errors",
"dask/array/tests/test_array_core.py::test_atop_names",
"dask/array/tests/test_array_core.py::test_atop_new_axes",
"dask/array/tests/test_array_core.py::test_atop_kwargs",
"dask/array/tests/test_array_core.py::test_atop_chunks",
"dask/array/tests/test_array_core.py::test_from_delayed",
"dask/array/tests/test_array_core.py::test_A_property",
"dask/array/tests/test_array_core.py::test_copy_mutate",
"dask/array/tests/test_array_core.py::test_npartitions",
"dask/array/tests/test_array_core.py::test_astype_gh1151",
"dask/array/tests/test_array_core.py::test_elemwise_name",
"dask/array/tests/test_array_core.py::test_map_blocks_name",
"dask/array/tests/test_array_core.py::test_from_array_names",
"dask/array/tests/test_array_core.py::test_array_picklable",
"dask/array/tests/test_array_core.py::test_swapaxes",
"dask/array/tests/test_array_core.py::test_from_array_raises_on_bad_chunks",
"dask/array/tests/test_array_core.py::test_concatenate_axes",
"dask/array/tests/test_array_core.py::test_atop_concatenate",
"dask/array/tests/test_array_core.py::test_common_blockdim",
"dask/array/tests/test_array_core.py::test_uneven_chunks_that_fit_neatly",
"dask/array/tests/test_array_core.py::test_elemwise_uneven_chunks",
"dask/array/tests/test_array_core.py::test_uneven_chunks_atop",
"dask/array/tests/test_array_core.py::test_warn_bad_rechunking",
"dask/array/tests/test_array_core.py::test_optimize_fuse_keys",
"dask/array/tests/test_array_core.py::test_round",
"dask/array/tests/test_array_core.py::test_tile[0-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[0-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[1-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[1-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[2-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[2-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[3-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[3-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile[5-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile[5-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-1-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-1-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-5-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_neg_reps[-5-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps0-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps0-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps1-shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_tile_array_reps[reps1-shape1-chunks1]",
"dask/array/tests/test_array_core.py::test_concatenate_stack_dont_warn",
"dask/array/tests/test_array_core.py::test_map_blocks_delayed",
"dask/array/tests/test_array_core.py::test_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_2d",
"dask/array/tests/test_array_core.py::test_no_chunks_yes_chunks",
"dask/array/tests/test_array_core.py::test_raise_informative_errors_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_slicing_2d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_1d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_2d",
"dask/array/tests/test_array_core.py::test_setitem_1d",
"dask/array/tests/test_array_core.py::test_setitem_2d",
"dask/array/tests/test_array_core.py::test_setitem_errs",
"dask/array/tests/test_array_core.py::test_zero_slice_dtypes",
"dask/array/tests/test_array_core.py::test_zero_sized_array_rechunk",
"dask/array/tests/test_array_core.py::test_atop_zero_shape",
"dask/array/tests/test_array_core.py::test_atop_zero_shape_new_axes",
"dask/array/tests/test_array_core.py::test_broadcast_against_zero_shape",
"dask/array/tests/test_array_core.py::test_fast_from_array",
"dask/array/tests/test_array_core.py::test_random_from_array",
"dask/array/tests/test_array_core.py::test_concatenate_errs",
"dask/array/tests/test_array_core.py::test_stack_errs",
"dask/array/tests/test_array_core.py::test_transpose_negative_axes",
"dask/array/tests/test_array_core.py::test_atop_with_numpy_arrays",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-6]",
"dask/array/tests/test_array_core.py::test_constructor_plugin",
"dask/array/tests/test_array_core.py::test_no_warnings_on_metadata"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,513 | 167 | [
"dask/array/core.py"
] |
peterbe__premailer-185 | 597b57740c661df8b3f4f5bdec7c495afe955275 | 2017-07-25 15:57:29 | 597b57740c661df8b3f4f5bdec7c495afe955275 | coveralls:
[](https://coveralls.io/builds/12542863)
Coverage remained the same at 100.0% when pulling **b4509c51881fb2ace20f6af569f2e44aa0c900da on no-exclude_pseudoclasses-without-selector-value** into **597b57740c661df8b3f4f5bdec7c495afe955275 on master**.
| diff --git a/premailer/premailer.py b/premailer/premailer.py
index 7cfb186..0a8c40d 100644
--- a/premailer/premailer.py
+++ b/premailer/premailer.py
@@ -250,6 +250,9 @@ class Premailer(object):
continue
elif '*' in selector and not self.include_star_selectors:
continue
+ elif selector.startswith(':'):
+ continue
+
# Crudely calculate specificity
id_count = selector.count('#')
class_count = selector.count('.')
@@ -274,6 +277,7 @@ class Premailer(object):
len(rules) # this is the rule's index number
)
rules.append((specificity, selector, bulk))
+
return rules, leftover
def transform(self, pretty_print=True, **kwargs):
@@ -401,6 +405,7 @@ class Premailer(object):
else:
selector = new_selector
+ assert selector
sel = CSSSelector(selector)
items = sel(page)
if len(items):
| Crashing when using on Semantic with exclude_pseudoclasses=False
Tried running this script:
```
# Just a test script for premailer
import premailer
html = """
<html>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/2.2.9/semantic.min.css"/>
<style>
h1{ border: 1px solid blue}
h1:hover {border: 1px solid green}
</style>
<h1>Hey</h1>
</html>
"""
p = premailer.Premailer(html, exclude_pseudoclasses=False)
print(p.transform())
```
but it crashes and returns:
```
...
WARNING Property: Unknown Property name.
WARNING Property: Unknown Property name.
WARNING Property: Unknown Property name.
WARNING Property: Unknown Property name.
WARNING Property: Unknown Property name.
WARNING Property: Unknown Property name.
WARNING Property: Unknown Property name.
WARNING Property: Unknown Property name.
Traceback (most recent call last):
File "prem.py", line 18, in <module>
print(p.transform())
File "/home/jddantes/.conda/envs/pulse/lib/python3.6/site-packages/premailer/premailer.py", line 404, in transform
sel = CSSSelector(selector)
File "/home/jddantes/.conda/envs/pulse/lib/python3.6/site-packages/lxml/cssselect.py", line 94, in __init__
path = translator.css_to_xpath(css)
File "/home/jddantes/.conda/envs/pulse/lib/python3.6/site-packages/cssselect/xpath.py", line 192, in css_to_xpath
for selector in parse(css))
File "/home/jddantes/.conda/envs/pulse/lib/python3.6/site-packages/cssselect/parser.py", line 355, in parse
return list(parse_selector_group(stream))
File "/home/jddantes/.conda/envs/pulse/lib/python3.6/site-packages/cssselect/parser.py", line 370, in parse_selector_group
yield Selector(*parse_selector(stream))
File "/home/jddantes/.conda/envs/pulse/lib/python3.6/site-packages/cssselect/parser.py", line 378, in parse_selector
result, pseudo_element = parse_simple_selector(stream)
File "/home/jddantes/.conda/envs/pulse/lib/python3.6/site-packages/cssselect/parser.py", line 480, in parse_simple_selector
"Expected selector, got %s" % (stream.peek(),))
cssselect.parser.SelectorSyntaxError: Expected selector, got <EOF at 0>
```
premailer==3.0.0
lxml==3.8.0 | peterbe/premailer | diff --git a/premailer/tests/test_premailer.py b/premailer/tests/test_premailer.py
index dc7f2c1..fa5c2eb 100644
--- a/premailer/tests/test_premailer.py
+++ b/premailer/tests/test_premailer.py
@@ -2622,3 +2622,53 @@ sheet" type="text/css">
)
result_html = p.transform()
compare_html(expect_html, result_html)
+
+ def test_pseudo_selectors_without_selector(self):
+ """Happens when you have pseudo selectors without an actual selector.
+ Which means it's not possible to find it in the DOM.
+
+ For example:
+
+ <style>
+ :before{box-sizing:inherit}
+ </style>
+
+ Semantic-UI uses this in its normalizer.
+
+ Original issue: https://github.com/peterbe/premailer/issues/184
+ """
+
+ html = """
+ <html>
+ <style>
+ *,:after,:before{box-sizing:inherit}
+ h1{ border: 1px solid blue}
+ h1:hover {border: 1px solid green}
+
+ </style>
+ <h1>Hey</h1>
+ </html>
+ """
+
+ expect_html = """
+<html>
+ <head>
+ <style>
+ *,:after,:before{box-sizing:inherit}
+ h1{ border: 1px solid blue}
+ h1:hover {border: 1px solid green}
+
+ </style>
+ </head>
+ <body>
+ <h1 style="{border:1px solid blue} :hover{border:1px solid green}">Hey</h1>
+ </body>
+</html>
+ """
+ p = Premailer(
+ html,
+ exclude_pseudoclasses=False,
+ keep_style_tags=True,
+ )
+ result_html = p.transform()
+ compare_html(expect_html, result_html)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"mock",
"coverage",
"pytest"
],
"pre_install": [
"pip install tox coveralls"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
cssselect==1.1.0
cssutils==2.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lxml==5.3.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
-e git+https://github.com/peterbe/premailer.git@597b57740c661df8b3f4f5bdec7c495afe955275#egg=premailer
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
requests==2.27.1
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: premailer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- cssselect==1.1.0
- cssutils==2.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- lxml==5.3.1
- mock==5.2.0
- nose==1.3.7
- platformdirs==2.4.0
- requests==2.27.1
- six==1.17.0
- tox==3.28.0
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/premailer
| [
"premailer/tests/test_premailer.py::Tests::test_pseudo_selectors_without_selector"
] | [] | [
"premailer/tests/test_premailer.py::Tests::test_3_digit_color_expand",
"premailer/tests/test_premailer.py::Tests::test_align_float_images",
"premailer/tests/test_premailer.py::Tests::test_apple_newsletter_example",
"premailer/tests/test_premailer.py::Tests::test_base_url_fixer",
"premailer/tests/test_premailer.py::Tests::test_base_url_with_path",
"premailer/tests/test_premailer.py::Tests::test_basic_html",
"premailer/tests/test_premailer.py::Tests::test_basic_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_basic_html_with_pseudo_selector",
"premailer/tests/test_premailer.py::Tests::test_basic_xml",
"premailer/tests/test_premailer.py::Tests::test_broken_xml",
"premailer/tests/test_premailer.py::Tests::test_capture_cssutils_logging",
"premailer/tests/test_premailer.py::Tests::test_child_selector",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_argument",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_stdin",
"premailer/tests/test_premailer.py::Tests::test_command_line_preserve_style_tags",
"premailer/tests/test_premailer.py::Tests::test_comments_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_css_disable_basic_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_disable_leftover_css",
"premailer/tests/test_premailer.py::Tests::test_css_text",
"premailer/tests/test_premailer.py::Tests::test_css_text_with_only_body_present",
"premailer/tests/test_premailer.py::Tests::test_css_with_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_excluded",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_included",
"premailer/tests/test_premailer.py::Tests::test_disabled_validator",
"premailer/tests/test_premailer.py::Tests::test_doctype",
"premailer/tests/test_premailer.py::Tests::test_empty_style_tag",
"premailer/tests/test_premailer.py::Tests::test_external_links",
"premailer/tests/test_premailer.py::Tests::test_external_links_unfindable",
"premailer/tests/test_premailer.py::Tests::test_external_styles_and_links",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_http",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_https",
"premailer/tests/test_premailer.py::Tests::test_external_styles_with_base_url",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_class_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_element_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_id_over_others",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_important_over_others",
"premailer/tests/test_premailer.py::Tests::test_fontface_selectors_with_no_selectortext",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_external_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_incorrectly",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_inline_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_style_elements_with_media_attribute",
"premailer/tests/test_premailer.py::Tests::test_include_star_selector",
"premailer/tests/test_premailer.py::Tests::test_inline_important",
"premailer/tests/test_premailer.py::Tests::test_inline_wins_over_external",
"premailer/tests/test_premailer.py::Tests::test_keyframe_selectors",
"premailer/tests/test_premailer.py::Tests::test_last_child",
"premailer/tests/test_premailer.py::Tests::test_last_child_exclude_pseudo",
"premailer/tests/test_premailer.py::Tests::test_leftover_important",
"premailer/tests/test_premailer.py::Tests::test_links_without_protocol",
"premailer/tests/test_premailer.py::Tests::test_load_external_url",
"premailer/tests/test_premailer.py::Tests::test_mailto_url",
"premailer/tests/test_premailer.py::Tests::test_mediaquery",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_basic",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_non_trivial",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_class",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_unset",
"premailer/tests/test_premailer.py::Tests::test_mixed_pseudo_selectors",
"premailer/tests/test_premailer.py::Tests::test_multiple_style_elements",
"premailer/tests/test_premailer.py::Tests::test_multithreading",
"premailer/tests/test_premailer.py::Tests::test_parse_style_rules",
"premailer/tests/test_premailer.py::Tests::test_precedence_comparison",
"premailer/tests/test_premailer.py::Tests::test_prefer_inline_to_class",
"premailer/tests/test_premailer.py::Tests::test_remove_classes",
"premailer/tests/test_premailer.py::Tests::test_remove_unset_properties",
"premailer/tests/test_premailer.py::Tests::test_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_six_color",
"premailer/tests/test_premailer.py::Tests::test_strip_important",
"premailer/tests/test_premailer.py::Tests::test_style_attribute_specificity",
"premailer/tests/test_premailer.py::Tests::test_style_block_with_external_urls",
"premailer/tests/test_premailer.py::Tests::test_tel_url",
"premailer/tests/test_premailer.py::Tests::test_turnoff_cache_works_as_expected",
"premailer/tests/test_premailer.py::Tests::test_type_test",
"premailer/tests/test_premailer.py::Tests::test_uppercase_margin",
"premailer/tests/test_premailer.py::Tests::test_xml_cdata"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,515 | 244 | [
"premailer/premailer.py"
] |
asottile__add-trailing-comma-34 | bd9cb3fdaecb61b016162d2071de2cf0ef631eaa | 2017-07-26 10:20:33 | 35ce9905dc9422e354c37fe5a941dc8198de4d56 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 006dfe8..846894c 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -341,11 +341,11 @@ def _fix_src(contents_text, py35_plus, py36_plus):
add_comma = not func.star_args or py36_plus
# functions can be treated as calls
fixes.append((add_comma, _find_call(func, i, tokens)))
- # Handle parenthesized things
- elif token.src == '(':
- fixes.append((False, _find_simple(i, tokens)))
elif key in visitor.literals:
fixes.append((True, _find_simple(i, tokens)))
+ # Handle parenthesized things, unhug of tuples, and comprehensions
+ elif token.src in START_BRACES:
+ fixes.append((False, _find_simple(i, tokens)))
for add_comma, fix_data in fixes:
if fix_data is not None:
| autopep8 and add-trailing-comma fight over indentation
Here's an (admittedly poorly-formatted) reproduction. FWIW I prefer add-trailing-comma's approach, but curious to hear your thoughts. This is obviously weird indentation but ideally they wouldn't fight. Do we need to make autopep8 smarter?
## what add-trailing-comma wants
```python
[a()
for b in c
if (
d
)
]
```
## what autopep8 wants
```python
[a()
for b in c
if (
d
)
]
``` | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index a663f99..ff4a4a8 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -577,6 +577,23 @@ def test_noop_unhugs(src):
' ], None,\n'
')',
),
+ # Regression test for #32
+ (
+ '[a()\n'
+ ' for b in c\n'
+ ' if (\n'
+ ' d\n'
+ ' )\n'
+ ']',
+
+ '[\n'
+ ' a()\n'
+ ' for b in c\n'
+ ' if (\n'
+ ' d\n'
+ ' )\n'
+ ']',
+ ),
),
)
def test_fix_unhugs(src, expected):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"numpy>=1.16.0",
"pandas>=1.0.0"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@bd9cb3fdaecb61b016162d2071de2cf0ef631eaa#egg=add_trailing_comma
exceptiongroup==1.2.2
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tokenize_rt==6.1.0
tomli==2.2.1
tzdata==2025.2
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tokenize-rt==6.1.0
- tomli==2.2.1
- tzdata==2025.2
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_unhugs[[a()\\n"
] | [
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fixes_calls[(\\n"
] | [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n",
"tests/add_trailing_comma_test.py::test_noop_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_literals[if",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs_py36_plus[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs",
"tests/add_trailing_comma_test.py::test_main_py36_plus_implies_py35_plus",
"tests/add_trailing_comma_test.py::test_main_py36_plus_function_trailing_commas"
] | [] | MIT License | 1,521 | 246 | [
"add_trailing_comma.py"
] |
|
pydicom__pydicom-443 | 4c507a269b5cf2f2ad60913d8411f7b2bfd28969 | 2017-07-26 20:26:43 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | pep8speaks: Hello @mrbean-bremen! Thanks for submitting the PR.
- In the file [`pydicom/multival.py`](https://github.com/pydicom/pydicom/blob/83d0e17f8740bf674b08eeec963cbf1d9dd99013/pydicom/multival.py), following are the PEP8 issues :
> [Line 45:13](https://github.com/pydicom/pydicom/blob/83d0e17f8740bf674b08eeec963cbf1d9dd99013/pydicom/multival.py#L45): [E731](https://duckduckgo.com/?q=pep8%20E731) do not assign a lambda expression, use a def
scaramallion: Looks good to me.
massich: Still not tested. lines 294, 322, 352 in file `pydicom/filewriter.py` are not covered.
Two options:
a) either create 3 tests to test `write_TM`, `write_DT` and `write_DA` respectively by passing `data_element` with multiple values insde and ensuring that the written file is correct
Or b) refactor the function and add a single test to ensure that the string is properly generated
mrbean-bremen: Note: I have squashed the relevant commits and separated the pep8 commit, so this can be rebased.
mrbean-bremen: @darcymason, @massich - please check if this is sufficient.
mrbean-bremen: @massich - I added a test to cover the missing line
massich: LGTM. (well... travis is doing wired stuff, but I think that it might be that they just changed something and they are rebuilding the wheels in gdcm. Let them sleep through it, trigger travis again and I think would be all green)
mrbean-bremen: Note: I have removed the PEP-8 commit (good that I had separated it...), as that has already been done by @scaramallion.
mrbean-bremen: @darcymason, @massich - anything left to do here?
darcymason: LGTM to me, other than my one minor test comment. I'll try to check in later today to merge, or @scaramallion can do so if no further comments from @massich. | diff --git a/pydicom/dataelem.py b/pydicom/dataelem.py
index 36315e0ba..ea60bc4d0 100644
--- a/pydicom/dataelem.py
+++ b/pydicom/dataelem.py
@@ -14,6 +14,8 @@ A DataElement has a tag,
from __future__ import absolute_import
from collections import namedtuple
+from pydicom.multival import MultiValue
+
from pydicom.charset import default_encoding
from pydicom import config # don't import datetime_conversion directly
@@ -230,10 +232,7 @@ class DataElement(object):
except AttributeError: # not a list
return self._convert(val)
else:
- returnvalue = []
- for subval in val:
- returnvalue.append(self._convert(subval))
- return returnvalue
+ return MultiValue(lambda x: self._convert(x), val)
def _convert(self, val):
"""Convert `val` to an appropriate type for the element's VR."""
diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index 0ccbf1506..17286a413 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -10,6 +10,7 @@ from pydicom.charset import default_encoding, text_VRs, convert_encodings
from pydicom.datadict import keyword_for_tag
from pydicom.dataset import Dataset
from pydicom.filebase import DicomFile, DicomFileLike
+from pydicom.multival import MultiValue
from pydicom.tag import Tag, ItemTag, ItemDelimiterTag, SequenceDelimiterTag
from pydicom.tagtools import tag_in_exception
from pydicom.uid import (PYDICOM_IMPLEMENTATION_UID, ImplicitVRLittleEndian,
@@ -200,9 +201,14 @@ def write_UI(fp, data_element):
write_string(fp, data_element, '\0') # pad with 0-byte to even length
+def _is_multi_value(val):
+ """Return True if `val` is a multi-value container."""
+ return isinstance(val, (MultiValue, list, tuple))
+
+
def multi_string(val):
"""Put a string together with delimiter if has more than one value"""
- if isinstance(val, (list, tuple)):
+ if _is_multi_value(val):
# \ is escape chr, so "\\" gives single backslash
return "\\".join(val)
else:
@@ -248,7 +254,7 @@ def write_number_string(fp, data_element, padding=' '):
# file
val = data_element.value
- if isinstance(val, (list, tuple)):
+ if _is_multi_value(val):
val = "\\".join((x.original_string
if hasattr(x, 'original_string') else str(x)
for x in val))
@@ -281,7 +287,7 @@ def write_DA(fp, data_element, padding=' '):
if isinstance(val, (str, compat.string_types)):
write_string(fp, data_element, padding)
else:
- if isinstance(val, (list, tuple)):
+ if _is_multi_value(val):
val = "\\".join((x if isinstance(x, (str, compat.string_types))
else _format_DA(x) for x in val))
else:
@@ -309,7 +315,7 @@ def write_DT(fp, data_element, padding=' '):
if isinstance(val, (str, compat.string_types)):
write_string(fp, data_element, padding)
else:
- if isinstance(val, (list, tuple)):
+ if _is_multi_value(val):
val = "\\".join((x if isinstance(x, (str, compat.string_types))
else _format_DT(x) for x in val))
else:
@@ -339,7 +345,7 @@ def write_TM(fp, data_element, padding=' '):
if isinstance(val, (str, compat.string_types)):
write_string(fp, data_element, padding)
else:
- if isinstance(val, (list, tuple)):
+ if _is_multi_value(val):
val = "\\".join((x if isinstance(x, (str, compat.string_types))
else _format_TM(x) for x in val))
else:
diff --git a/pydicom/multival.py b/pydicom/multival.py
index 0829ac76a..5bc72b0d4 100644
--- a/pydicom/multival.py
+++ b/pydicom/multival.py
@@ -8,9 +8,10 @@ or any list of items that must all be the same type.
# See the file LICENSE included with this distribution, also
# available at https://github.com/pydicom/pydicom
#
+from collections import MutableSequence
-class MultiValue(list):
+class MultiValue(MutableSequence):
"""Class to hold any multi-valued DICOM value,
or any list of items that are all of the same type.
@@ -38,39 +39,44 @@ class MultiValue(list):
"""
from pydicom.valuerep import DSfloat, DSdecimal, IS
- self.type_constructor = type_constructor
-
- if isinstance(type_constructor, (DSfloat, IS, DSdecimal)):
- converted_list = [
- type_constructor(x) if x != '' else x for x in iterable
- ]
- else:
- converted_list = [type_constructor(x) for x in iterable]
- super(MultiValue, self).__init__(converted_list)
+ def number_string_type_constructor(x):
+ return self.type_constructor(x) if x != '' else x
- def append(self, val):
- super(MultiValue, self).append(self.type_constructor(val))
-
- def __deepcopy__(self, memo):
- return MultiValue(self.type_constructor, self)
-
- def extend(self, list_of_vals):
- super(MultiValue, self).extend((self.type_constructor(x)
- for x in list_of_vals))
+ self._list = list()
+ self.type_constructor = type_constructor
+ if type_constructor in (DSfloat, IS, DSdecimal):
+ type_constructor = number_string_type_constructor
+ for x in iterable:
+ self._list.append(type_constructor(x))
def insert(self, position, val):
- super(MultiValue, self).insert(position, self.type_constructor(val))
+ self._list.insert(position, self.type_constructor(val))
def __setitem__(self, i, val):
"""Set an item of the list, making sure it is of the right VR type"""
if isinstance(i, slice):
- val = [self.type_constructor(x) for x in val]
+ val = [self.type_constructor(v) for v in val]
+ self._list.__setitem__(i, val)
else:
- val = self.type_constructor(val)
- super(MultiValue, self).__setitem__(i, val)
+ self._list.__setitem__(i, self.type_constructor(val))
def __str__(self):
lines = [str(x) for x in self]
return "['" + "', '".join(lines) + "']"
__repr__ = __str__
+
+ def __len__(self):
+ return len(self._list)
+
+ def __getitem__(self, index):
+ return self._list[index]
+
+ def __delitem__(self, index):
+ del self._list[index]
+
+ def __iter__(self):
+ return iter(self._list)
+
+ def __eq__(self, other):
+ return self._list == other
| Is this a bug in write_file?
_From [[email protected]](https://code.google.com/u/100845324794297671624/) on November 09, 2013 13:17:27_
What steps will reproduce the problem? (code stub attached)
1. Modify a valid dvh value: ds1.DVHs[0].DVHData[0] = NewFirstPoint
2. Save using ds1.write_file, and read back again into new ds2
3. Read-in data is not updated correctly; ds1.DVHs[0].DVHData[0] <> ds2.DVHS[0].DVHData[0]. The data stored reflects the ORIGINAL data in ds1 (which means that write_file is getting the data from a shadowed copy of the data? Subtle.) What is the expected output? What do you see instead? Sample code attached. Used a valid RT Dose file with DVH's as a starting point. Later, tried creating DVH's from scratch, and found a similar behavior. Once DVHData is appended to DVHs[index], elements of DVHData that are changed programmatically are not saved with write_file.
Is this a bug, or have I missed a step? What version of the product are you using? Tried this with both 0.9.6 and 0.9.7 **_NOTE**_: any text or attached files posted with the issue can be viewed by anyone. You are solely responsible to ensure that they contain no confidential information of any kind. Please provide any additional information below. CODE STUB ATTACHED
**Attachment:** [dvh_rw_error.py](http://code.google.com/p/pydicom/issues/detail?id=135)
_Original issue: http://code.google.com/p/pydicom/issues/detail?id=135_
| pydicom/pydicom | diff --git a/pydicom/tests/test_dataelem.py b/pydicom/tests/test_dataelem.py
index 2fbc4709d..56d0c49d4 100644
--- a/pydicom/tests/test_dataelem.py
+++ b/pydicom/tests/test_dataelem.py
@@ -11,6 +11,8 @@ import unittest
import sys
+from pydicom.valuerep import DSfloat
+
from pydicom.charset import default_encoding
from pydicom.dataelem import DataElement
@@ -43,6 +45,26 @@ class DataElementTests(unittest.TestCase):
self.assertEqual(VM, 1,
"Wrong Value Multiplicity, expected 1, got %i" % VM)
+ def testDSFloatConversion(self):
+ """Test that strings are correctly converted if changing the value."""
+ self.assertTrue(isinstance(self.data_elementDS.value, DSfloat))
+ self.assertTrue(isinstance(self.data_elementMulti.value[0], DSfloat))
+ self.assertEqual(DSfloat('42.1'), self.data_elementMulti.value[0])
+
+ # multi-value append/insert
+ self.data_elementMulti.value.append('42.4')
+ self.assertTrue(isinstance(self.data_elementMulti.value[3], DSfloat))
+ self.assertEqual(DSfloat('42.4'), self.data_elementMulti.value[3])
+
+ self.data_elementMulti.value.insert(0, '42.0')
+ self.assertTrue(isinstance(self.data_elementMulti.value[0], DSfloat))
+ self.assertEqual(DSfloat('42.0'), self.data_elementMulti.value[0])
+
+ # change single value of multi-value
+ self.data_elementMulti.value[3] = '123.4'
+ self.assertTrue(isinstance(self.data_elementMulti.value[3], DSfloat))
+ self.assertEqual(DSfloat('123.4'), self.data_elementMulti.value[3])
+
def testBackslash(self):
"""DataElement: String with '\\' sets multi-valued data_element."""
data_element = DataElement((1, 2), "DS", r"42.1\42.2\42.3")
diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index bc78943b1..777462a9e 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -274,10 +274,71 @@ class WriteDataElementTests(unittest.TestCase):
got = self.f1.getvalue()
msg = ("Did not write zero-length AT value correctly. "
"Expected %r, got %r") % (bytes2hex(expected), bytes2hex(got))
- msg = "%r %r" % (type(expected), type(got))
- msg = "'%r' '%r'" % (expected, got)
self.assertEqual(expected, got, msg)
+ def check_data_element(self, data_elem, expected):
+ encoded_elem = self.encode_element(data_elem)
+ self.assertEqual(expected, encoded_elem)
+
+ def test_write_DA(self):
+ data_elem = DataElement(0x00080022, 'DA', '20000101')
+ expected = (b'\x08\x00\x22\x00' # tag
+ b'\x08\x00\x00\x00' # length
+ b'20000101') # value
+ self.check_data_element(data_elem, expected)
+ data_elem = DataElement(0x00080022, 'DA', date(2000, 1, 1))
+ self.check_data_element(data_elem, expected)
+
+ def test_write_multi_DA(self):
+ data_elem = DataElement(0x0014407E, 'DA', ['20100101', '20101231'])
+ expected = (b'\x14\x00\x7E\x40' # tag
+ b'\x12\x00\x00\x00' # length
+ b'20100101\\20101231 ') # padded value
+ self.check_data_element(data_elem, expected)
+ data_elem = DataElement(0x0014407E, 'DA', [date(2010, 1, 1),
+ date(2010, 12, 31)])
+ self.check_data_element(data_elem, expected)
+
+ def test_write_TM(self):
+ data_elem = DataElement(0x00080030, 'TM', '010203')
+ expected = (b'\x08\x00\x30\x00' # tag
+ b'\x06\x00\x00\x00' # length
+ b'010203') # padded value
+ self.check_data_element(data_elem, expected)
+ data_elem = DataElement(0x00080030, 'TM', time(1, 2, 3))
+ self.check_data_element(data_elem, expected)
+
+ def test_write_multi_TM(self):
+ data_elem = DataElement(0x0014407C, 'TM', ['082500', '092655'])
+ expected = (b'\x14\x00\x7C\x40' # tag
+ b'\x0E\x00\x00\x00' # length
+ b'082500\\092655 ') # padded value
+ self.check_data_element(data_elem, expected)
+ data_elem = DataElement(0x0014407C, 'TM', [time(8, 25),
+ time(9, 26, 55)])
+ self.check_data_element(data_elem, expected)
+
+ def test_write_DT(self):
+ data_elem = DataElement(0x0008002A, 'DT', '20170101120000')
+ expected = (b'\x08\x00\x2A\x00' # tag
+ b'\x0E\x00\x00\x00' # length
+ b'20170101120000') # value
+ self.check_data_element(data_elem, expected)
+ data_elem = DataElement(0x0008002A, 'DT', datetime(2017, 1, 1, 12))
+ self.check_data_element(data_elem, expected)
+
+ def test_write_multi_DT(self):
+ data_elem = DataElement(0x0040A13A, 'DT',
+ ['20120820120804', '20130901111111'])
+ expected = (b'\x40\x00\x3A\xA1' # tag
+ b'\x1E\x00\x00\x00' # length
+ b'20120820120804\\20130901111111 ') # padded value
+ self.check_data_element(data_elem, expected)
+ data_elem = DataElement(0x0040A13A, 'DT',
+ [datetime(2012, 8, 20, 12, 8, 4),
+ datetime(2013, 9, 1, 11, 11, 11)])
+ self.check_data_element(data_elem, expected)
+
def test_write_OD_implicit_little(self):
"""Test writing elements with VR of OD works correctly."""
# VolumetricCurvePoints
diff --git a/pydicom/tests/test_multival.py b/pydicom/tests/test_multival.py
index b44ad7f65..df277e640 100644
--- a/pydicom/tests/test_multival.py
+++ b/pydicom/tests/test_multival.py
@@ -26,6 +26,18 @@ class MultiValuetests(unittest.TestCase):
self.assertTrue(isinstance(val, (DSfloat, DSdecimal)),
"Multi-value DS item not converted to DS")
+ def testEmptyElements(self):
+ """MultiValue: Empty number string elements are not converted..."""
+ multival = MultiValue(DSfloat, ['1.0', ''])
+ self.assertEqual(1.0, multival[0])
+ self.assertEqual('', multival[1])
+ multival = MultiValue(IS, ['1', ''])
+ self.assertEqual(1, multival[0])
+ self.assertEqual('', multival[1])
+ multival = MultiValue(DSdecimal, ['1', ''])
+ self.assertEqual(1, multival[0])
+ self.assertEqual('', multival[1])
+
def testLimits(self):
"""MultiValue: Raise error if any item outside DICOM limits...."""
original_flag = config.enforce_valid_values
@@ -52,6 +64,14 @@ class MultiValuetests(unittest.TestCase):
self.assertEqual(multival[1], 7,
"Item set by index is not correct value")
+ def testDeleteIndex(self):
+ """MultiValue: Deleting item at index behaves as expected..."""
+ multival = MultiValue(IS, [1, 5, 10])
+ del multival[1]
+ self.assertEqual(2, len(multival))
+ self.assertEqual(multival[0], 1)
+ self.assertEqual(multival[1], 10)
+
def testExtend(self):
"""MultiValue: Extending a list converts all to required type"""
multival = MultiValue(IS, [1, 5, 10])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"numpy>=1.16.0",
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/pydicom/pydicom.git@4c507a269b5cf2f2ad60913d8411f7b2bfd28969#egg=pydicom
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- numpy==1.19.5
- pytest-cov==4.0.0
- tomli==1.2.3
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_dataelem.py::DataElementTests::testDSFloatConversion",
"pydicom/tests/test_multival.py::MultiValuetests::testEmptyElements"
] | [] | [
"pydicom/tests/test_dataelem.py::DataElementTests::testBackslash",
"pydicom/tests/test_dataelem.py::DataElementTests::testEqualityInheritance",
"pydicom/tests/test_dataelem.py::DataElementTests::testEqualityNotElement",
"pydicom/tests/test_dataelem.py::DataElementTests::testEqualityPrivateElement",
"pydicom/tests/test_dataelem.py::DataElementTests::testEqualitySequenceElement",
"pydicom/tests/test_dataelem.py::DataElementTests::testEqualityStandardElement",
"pydicom/tests/test_dataelem.py::DataElementTests::testHash",
"pydicom/tests/test_dataelem.py::DataElementTests::testKeyword",
"pydicom/tests/test_dataelem.py::DataElementTests::testRetired",
"pydicom/tests/test_dataelem.py::DataElementTests::testUID",
"pydicom/tests/test_dataelem.py::DataElementTests::testVM1",
"pydicom/tests/test_dataelem.py::DataElementTests::testVM2",
"pydicom/tests/test_dataelem.py::DataElementTests::test_equality_class_members",
"pydicom/tests/test_dataelem.py::DataElementTests::test_repeater_str",
"pydicom/tests/test_dataelem.py::RawDataElementTests::testKeyError",
"pydicom/tests/test_dataelem.py::RawDataElementTests::testTagWithoutEncodingPython3",
"pydicom/tests/test_dataelem.py::RawDataElementTests::testValidTag",
"pydicom/tests/test_filewriter.py::WriteFileTests::testCT",
"pydicom/tests/test_filewriter.py::WriteFileTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::WriteFileTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMR",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMultiPN",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTDose",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTPlan",
"pydicom/tests/test_filewriter.py::WriteFileTests::testUnicode",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::WriteFileTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_empty_AT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_TM",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_lut_descriptor",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_overlay",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_data",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_one",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_three",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_sequence",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_waveform_bits_allocated",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_big_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_little_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_raises",
"pydicom/tests/test_filewriter.py::ScratchWriteTests::testImpl_LE_deflen_write",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_none_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_bad_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix_none",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_ds_changed",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_raises",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_media_storage_sop_class_uid_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raise_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_add_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_standard",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_commandset_no_written",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_version",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_version_name_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_class_uid_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_ds_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_file_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_read_write_identical",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_multival.py::MultiValuetests::testAppend",
"pydicom/tests/test_multival.py::MultiValuetests::testDeleteIndex",
"pydicom/tests/test_multival.py::MultiValuetests::testExtend",
"pydicom/tests/test_multival.py::MultiValuetests::testIssue236DeepCopy",
"pydicom/tests/test_multival.py::MultiValuetests::testLimits",
"pydicom/tests/test_multival.py::MultiValuetests::testMultiDS",
"pydicom/tests/test_multival.py::MultiValuetests::testSetIndex",
"pydicom/tests/test_multival.py::MultiValuetests::testSlice"
] | [] | MIT License | 1,523 | 1,743 | [
"pydicom/dataelem.py",
"pydicom/filewriter.py",
"pydicom/multival.py"
] |
google__mobly-279 | 49db9368415e40a3bf0512bddf6c0e3170513a41 | 2017-07-28 18:54:44 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | dthkao:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/records.py, line 429 at r1](https://reviewable.io:443/reviews/google/mobly/279#-KqFgaA3dUDwcpw4Ygiu:-KqFgaA3dUDwcpw4Ygiv:b-wjrgfc) ([raw file](https://github.com/google/mobly/blob/b6686bb09105a416b5155a0cceac1a06272cc3ec/mobly/records.py#L429)):*
> ```Python
> return json_str
>
> def get_requested_test_names(self):
> ```
What prevents someone from accessing self.requested? (As we do?)
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/279)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/records.py, line 429 at r1](https://reviewable.io:443/reviews/google/mobly/279#-KqFgaA3dUDwcpw4Ygiu:-KqOodMnUi49SWUftmdp:bh7y1z9) ([raw file](https://github.com/google/mobly/blob/b6686bb09105a416b5155a0cceac1a06272cc3ec/mobly/records.py#L429)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
What prevents someone from accessing self.requested? (As we do?)
</blockquote></details>
test does not have access to the record object.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/279)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/records.py, line 429 at r1](https://reviewable.io:443/reviews/google/mobly/279#-KqFgaA3dUDwcpw4Ygiu:-KqOptsgc7YNqR5aNy-L:b1fqnmy) ([raw file](https://github.com/google/mobly/blob/b6686bb09105a416b5155a0cceac1a06272cc3ec/mobly/records.py#L429)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
How/where is this function used, then?
</blockquote></details>
It's used by Mobly itself. Are you seeing all the diffs in your view?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/279)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/records.py, line 429 at r1](https://reviewable.io:443/reviews/google/mobly/279#-KqFgaA3dUDwcpw4Ygiu:-KqOr4mFdxohb-QvcJXD:b-dhv434) ([raw file](https://github.com/google/mobly/blob/b6686bb09105a416b5155a0cceac1a06272cc3ec/mobly/records.py#L429)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
It's used by Mobly itself. Are you seeing all the diffs in your view?
</blockquote></details>
I am. There's no new usage of it in the diff?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/279)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/records.py, line 429 at r1](https://reviewable.io:443/reviews/google/mobly/279#-KqFgaA3dUDwcpw4Ygiu:-KqOrZNzcJvlKNzBYhl-:b-m8finj) ([raw file](https://github.com/google/mobly/blob/b6686bb09105a416b5155a0cceac1a06272cc3ec/mobly/records.py#L429)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
I am. There's no new usage of it in the diff?
</blockquote></details>
Hmmmm, you're right.
I think the idea behind this was to keep the job of serializing this results object in this class, but I didn't finish the job...
All the other entries are dicts, whereas the test names are just a list.
So we need a key to be associated with the list.
I was gonna put the key in this method, but somehow ended up with the current implementation, sorry...
Will upload a new patch.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/279)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/records.py, line 429 at r1](https://reviewable.io:443/reviews/google/mobly/279#-KqFgaA3dUDwcpw4Ygiu:-KqOvJCDVXkFJwXFKZeT:b-896fix) ([raw file](https://github.com/google/mobly/blob/b6686bb09105a416b5155a0cceac1a06272cc3ec/mobly/records.py#L429)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Hmmmm, you're right.
I think the idea behind this was to keep the job of serializing this results object in this class, but I didn't finish the job...
All the other entries are dicts, whereas the test names are just a list.
So we need a key to be associated with the list.
I was gonna put the key in this method, but somehow ended up with the current implementation, sorry...
Will upload a new patch.
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/279)*
<!-- Sent from Reviewable.io -->
dthkao: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 3 files reviewed at latest revision, 1 unresolved discussion.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/279#-:-KqOzAiQYu8XZTFQYYdN:bnfp4nl)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/records.py b/mobly/records.py
index 54be008..b6175cd 100644
--- a/mobly/records.py
+++ b/mobly/records.py
@@ -43,8 +43,16 @@ class TestSummaryEntryType(enum.Enum):
The idea is similar to how `TestResult.json_str` categorizes different
sections of a `TestResult` object in the serialized format.
"""
+ # A list of all the tests requested for a test run.
+ # This is dumped at the beginning of a summary file so we know what was
+ # requested in case the test is interrupted and the final summary is not.
+ # created.
+ TEST_NAME_LIST = 'TestNameList'
+ # Records of test results.
RECORD = 'Record'
+ # A summary of the test run stats, like how many test failed.
SUMMARY = 'Summary'
+ # Information on the controllers used in the test.
CONTROLLER_INFO = 'ControllerInfo'
@@ -418,6 +426,17 @@ class TestResult(object):
json_str = json.dumps(d, indent=4, sort_keys=True)
return json_str
+ def requested_test_names_dict(self):
+ """Gets the requested test names of a test run in a dict format.
+
+ Note a test can be requested multiple times, so there can be duplicated
+ values
+
+ Returns:
+ A dict with a key and the list of strings.
+ """
+ return {'Requested Tests': copy.deepcopy(self.requested)}
+
def summary_str(self):
"""Gets a string that summarizes the stats of this test result.
| add a prefix document to test_summary.yaml with requested tests
This could be dumped before any test cases are run | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index ece150a..5deaf5e 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -552,6 +552,8 @@ class BaseTestClass(object):
# No test method specified by user, execute all in test class.
test_names = self._get_all_test_names()
self.results.requested = test_names
+ self.summary_writer.dump(self.results.requested_test_names_dict(),
+ records.TestSummaryEntryType.TEST_NAME_LIST)
tests = self._get_test_methods(test_names)
# Setup for the class.
try:
diff --git a/tests/mobly/test_runner_test.py b/tests/mobly/test_runner_test.py
index e57a91f..7690c22 100755
--- a/tests/mobly/test_runner_test.py
+++ b/tests/mobly/test_runner_test.py
@@ -13,11 +13,14 @@
# limitations under the License.
import mock
+import os
import shutil
import tempfile
+import yaml
from future.tests.base import unittest
from mobly import config_parser
+from mobly import records
from mobly import signals
from mobly import test_runner
@@ -31,6 +34,7 @@ class TestRunnerTest(unittest.TestCase):
"""This test class has unit tests for the implementation of everything
under mobly.test_runner.
"""
+
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.base_mock_test_config = config_parser.TestRunConfig()
@@ -50,7 +54,7 @@ class TestRunnerTest(unittest.TestCase):
def test_register_controller_no_config(self):
tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
with self.assertRaisesRegex(signals.ControllerError,
- 'No corresponding config found for'):
+ 'No corresponding config found for'):
tr._register_controller(self.base_mock_test_config,
mock_controller)
@@ -177,6 +181,37 @@ class TestRunnerTest(unittest.TestCase):
}
self.assertEqual(tr.results.controller_info, expected_info)
+ def test_summary_file_entries(self):
+ """Verifies the output summary's file format.
+
+ This focuses on the format of the file instead of the content of
+ entries, which is covered in base_test_test.
+ """
+ mock_test_config = self.base_mock_test_config.copy()
+ mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME
+ my_config = [{
+ 'serial': 'xxxx',
+ 'magic': 'Magic1'
+ }, {
+ 'serial': 'xxxx',
+ 'magic': 'Magic2'
+ }]
+ mock_test_config.controller_configs[mock_ctrlr_config_name] = my_config
+ tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
+ tr.add_test_class(mock_test_config, integration_test.IntegrationTest)
+ tr.run()
+ summary_path = os.path.join(mock_test_config.log_path,
+ mock_test_config.test_bed_name, 'latest',
+ records.OUTPUT_FILE_SUMMARY)
+ with open(summary_path, 'r') as f:
+ summary_entries = list(yaml.load_all(f))
+ self.assertEqual(len(summary_entries), 4)
+ # Verify the first entry is the list of test names.
+ self.assertEqual(summary_entries[0]['Type'],
+ records.TestSummaryEntryType.TEST_NAME_LIST.value)
+ self.assertEqual(summary_entries[1]['Type'],
+ records.TestSummaryEntryType.RECORD.value)
+
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy(1))
@@ -265,8 +300,7 @@ class TestRunnerTest(unittest.TestCase):
def test_run_no_tests(self):
tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
- with self.assertRaisesRegex(test_runner.Error,
- 'No tests to execute.'):
+ with self.assertRaisesRegex(test_runner.Error, 'No tests to execute.'):
tr.run()
def test_verify_controller_module(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
future==1.0.0
iniconfig==2.1.0
-e git+https://github.com/google/mobly.git@49db9368415e40a3bf0512bddf6c0e3170513a41#egg=mobly
mock==1.0.1
packaging==24.2
pluggy==1.5.0
portpicker==1.6.0
psutil==7.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli==2.2.1
typing_extensions==4.13.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- future==1.0.0
- iniconfig==2.1.0
- mock==1.0.1
- packaging==24.2
- pluggy==1.5.0
- portpicker==1.6.0
- psutil==7.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_twice",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_two_test_classes",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_two_test_classes_different_configs"
] | [
"tests/mobly/test_runner_test.py::TestRunnerTest::test_summary_file_entries"
] | [
"tests/mobly/test_runner_test.py::TestRunnerTest::test_add_test_class_mismatched_log_path",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_add_test_class_mismatched_test_bed_name",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_change_return_value",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_dup_register",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_less_than_min_number",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_no_config",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_no_config_no_register",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_no_get_info",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_register_controller_return_value",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_run_no_tests",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_verify_controller_module",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_verify_controller_module_missing_attr",
"tests/mobly/test_runner_test.py::TestRunnerTest::test_verify_controller_module_null_attr"
] | [] | Apache License 2.0 | 1,533 | 384 | [
"mobly/records.py"
] |
Duke-GCB__DukeDSClient-153 | 0293338fa21dd12611e2991d012a1c4e801a361f | 2017-07-31 17:30:40 | bffebebd86d09f5924461959401ef3698b4e47d5 | diff --git a/ddsc/cmdparser.py b/ddsc/cmdparser.py
index b64cb30..9bf2a7b 100644
--- a/ddsc/cmdparser.py
+++ b/ddsc/cmdparser.py
@@ -160,6 +160,21 @@ def _add_auth_role_arg(arg_parser, default_permissions):
default=default_permissions)
+def _add_project_filter_auth_role_arg(arg_parser):
+ """
+ Adds optional auth_role filtering parameter to a parser.
+ :param arg_parser: ArgumentParser parser to add this argument to.
+ """
+ help_text = "Filters project listing to just those projects with the specified role. "
+ help_text += "See command list_auth_roles for AuthRole values."
+ arg_parser.add_argument("--auth-role",
+ metavar='AuthRole',
+ type=to_unicode,
+ dest='auth_role',
+ help=help_text,
+ default=None)
+
+
def _add_copy_project_arg(arg_parser):
"""
Adds optional copy_project parameter to a parser.
@@ -378,7 +393,9 @@ class CommandParser(object):
"""
description = "Show a list of project names or folders/files of a single project."
list_parser = self.subparsers.add_parser('list', description=description)
- add_project_name_arg(list_parser, required=False, help_text="Name of the project to show details for.")
+ project_name_or_auth_role = list_parser.add_mutually_exclusive_group(required=False)
+ _add_project_filter_auth_role_arg(project_name_or_auth_role)
+ add_project_name_arg(project_name_or_auth_role, required=False, help_text="Name of the project to show details for.")
list_parser.set_defaults(func=list_func)
def register_delete_command(self, delete_func):
diff --git a/ddsc/ddsclient.py b/ddsc/ddsclient.py
index 0f2d9b1..ff2449a 100644
--- a/ddsc/ddsclient.py
+++ b/ddsc/ddsclient.py
@@ -305,20 +305,30 @@ class ListCommand(object):
Lists project names.
:param args Namespace arguments parsed from the command line
"""
+ # project_name and auth_role args are mutually exclusive
if args.project_name:
project = self.remote_store.fetch_remote_project(args.project_name, must_exist=True)
self.print_project_details(project)
else:
- self.print_project_names()
+ self.print_project_names(args.auth_role)
- def print_project_details(self, project):
+ @staticmethod
+ def print_project_details(project):
filename_list = ProjectFilenameList()
filename_list.walk_project(project)
for info in filename_list.details:
print(info)
- def print_project_names(self):
- names = self.remote_store.get_project_names()
+ def print_project_names(self, filter_auth_role):
+ """
+ Prints project names to stdout for all projects or just those with the specified auth_role
+ :param filter_auth_role: str: optional auth_role to filter project list
+ """
+ if filter_auth_role:
+ projects = self.remote_store.get_projects_with_auth_role(auth_role=filter_auth_role)
+ names = [project['name'] for project in projects]
+ else:
+ names = self.remote_store.get_project_names()
if names:
for name in names:
print(pipes.quote(name))
| Add option to filter projects by role
Me:
> ... Duke DS team has developed a UI change to include the project role in the project listing (screenshot attached). This is on their dev server (https://dev.dataservice.duke.edu), and they’ve asked for feedback.
>
> One suggestion I’d have would be a select box to let you filter the list based on your role (e.g. show me only projects where I am an admin).
>
> Would such a change be useful in the command-line tool also? For example, running `ddsclient list —role admin` could display only the projects for which you have admin role.
>
User:
> Being able to filter by it on both the GUI and the command line tool would be very helpful for us. The mockup that you presented looks nice and will be very helpful as we sort through all of our projects (especially until we’re able to take ourselves off of old projects).
| Duke-GCB/DukeDSClient | diff --git a/ddsc/core/remotestore.py b/ddsc/core/remotestore.py
index b9f3e2a..6eeee03 100644
--- a/ddsc/core/remotestore.py
+++ b/ddsc/core/remotestore.py
@@ -254,6 +254,23 @@ class RemoteStore(object):
names.append(project['name'])
return names
+ def get_projects_with_auth_role(self, auth_role):
+ """
+ Return the list of projects that have the specified auth role from the list that the current user has access to.
+ :param auth_role: str: auth role we are filtering for
+ :return: [dict]: list of projects that have auth_role permissions for the current user
+ """
+ user = self.get_current_user()
+ # user.id
+ projects = []
+ response = self.data_service.get_projects().json()
+ for project in response['results']:
+ project_id = project['id']
+ permissions = self.data_service.get_user_project_permission(project_id, user.id).json()
+ if auth_role == permissions['auth_role']['id']:
+ projects.append(project)
+ return projects
+
def delete_project_by_name(self, project_name):
"""
Find the project named project_name and delete it raise error if not found.
diff --git a/ddsc/core/tests/test_remotestore.py b/ddsc/core/tests/test_remotestore.py
index b771f30..23d56ea 100644
--- a/ddsc/core/tests/test_remotestore.py
+++ b/ddsc/core/tests/test_remotestore.py
@@ -1,6 +1,6 @@
import json
from unittest import TestCase
-from mock import MagicMock
+from mock import MagicMock, Mock
from mock.mock import patch
from ddsc.core.remotestore import RemoteProject, RemoteFolder, RemoteFile, RemoteUser
from ddsc.core.remotestore import RemoteStore
@@ -416,6 +416,39 @@ class TestRemoteStore(TestCase):
expected_ids = set(["project_admin", "project_viewer"])
self.assertEqual(expected_ids, ids)
+ @patch("ddsc.core.remotestore.DataServiceApi")
+ def test_get_projects_with_auth_role(self, mock_data_service_api):
+ projects_resp = Mock()
+ projects_resp.json.return_value = {
+ 'results': [
+ {
+ 'id': '123'
+ },
+ {
+ 'id': '456'
+ }
+ ]
+ }
+ mock_data_service_api.return_value.get_projects.return_value = projects_resp
+ permission_resp = Mock()
+ permission_resp.json.side_effect = [
+ {
+ 'auth_role': {
+ 'id': 'project_admin'
+ }
+ }, {
+ 'auth_role': {
+ 'id': 'file_downloader'
+ }
+ }
+ ]
+ mock_data_service_api.return_value.get_user_project_permission.return_value = permission_resp
+ remote_store = RemoteStore(config=MagicMock())
+ result = remote_store.get_projects_with_auth_role(auth_role='project_admin')
+ mock_data_service_api.return_value.get_projects.assert_called()
+ self.assertEqual(1, len(result))
+ self.assertEqual('123', result[0]['id'])
+
class TestRemoteProjectChildren(TestCase):
def test_simple_case(self):
diff --git a/ddsc/tests/test_cmdparser.py b/ddsc/tests/test_cmdparser.py
index 6f66581..34523cb 100644
--- a/ddsc/tests/test_cmdparser.py
+++ b/ddsc/tests/test_cmdparser.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
from unittest import TestCase
from ddsc.cmdparser import CommandParser
+from mock import Mock
def no_op():
@@ -51,3 +52,31 @@ class TestCommandParser(TestCase):
self.assertEqual(['share'], list(command_parser.subparsers.choices.keys()))
command_parser.run_command(['share', '-p', 'someproject', '--user', 'joe123', '--msg-file', 'setup.py'])
self.assertIn('setup(', self.parsed_args.msg_file.read())
+
+ def test_list_command(self):
+ func = Mock()
+ command_parser = CommandParser()
+ command_parser.register_list_command(func)
+ self.assertEqual(['list'], list(command_parser.subparsers.choices.keys()))
+
+ # Test simple listing
+ command_parser.run_command(['list'])
+ func.assert_called()
+ args, kwargs = func.call_args
+ self.assertEqual(args[0].auth_role, None)
+ self.assertEqual(args[0].project_name, None)
+ func.reset_mock()
+
+ # Test simple listing single project
+ command_parser.run_command(['list', '-p', 'mouse'])
+ func.assert_called()
+ args, kwargs = func.call_args
+ self.assertEqual(args[0].auth_role, None)
+ self.assertEqual(args[0].project_name, 'mouse')
+
+ # Test simple listing auth_role
+ command_parser.run_command(['list', '--auth-role', 'project_admin'])
+ func.assert_called()
+ args, kwargs = func.call_args
+ self.assertEqual(args[0].auth_role, 'project_admin')
+ self.assertEqual(args[0].project_name, None)
diff --git a/ddsc/tests/test_ddsclient.py b/ddsc/tests/test_ddsclient.py
index d612b03..5e0c37d 100644
--- a/ddsc/tests/test_ddsclient.py
+++ b/ddsc/tests/test_ddsclient.py
@@ -1,8 +1,8 @@
from __future__ import absolute_import
from unittest import TestCase
-from ddsc.ddsclient import UploadCommand
+from ddsc.ddsclient import UploadCommand, ListCommand
from ddsc.ddsclient import ShareCommand, DeliverCommand, read_argument_file_contents
-from mock import patch, MagicMock, Mock
+from mock import patch, MagicMock, Mock, call
class TestUploadCommand(TestCase):
@@ -106,3 +106,38 @@ class TestDDSClient(TestCase):
self.assertEqual('', read_argument_file_contents(None))
with open("setup.py") as infile:
self.assertIn("setup(", read_argument_file_contents(infile))
+
+
+class TestListCommand(TestCase):
+ @patch('sys.stdout.write')
+ @patch('ddsc.ddsclient.RemoteStore')
+ def test_print_project_names_no_auth_role(self, mock_remote_store, mock_print):
+ mock_remote_store.return_value.get_project_names.return_value = ['one', 'two', 'three']
+ cmd = ListCommand(MagicMock())
+ cmd.print_project_names(filter_auth_role=None)
+ expected_calls = [
+ call("one"),
+ call("\n"),
+ call("two"),
+ call("\n"),
+ call("three"),
+ call("\n")
+ ]
+ self.assertEqual(expected_calls, mock_print.call_args_list)
+
+ @patch('sys.stdout.write')
+ @patch('ddsc.ddsclient.RemoteStore')
+ def test_print_project_names_with_auth_role(self, mock_remote_store, mock_print):
+ mock_remote_store.return_value.get_projects_with_auth_role.return_value = [
+ {'name': 'mouse'},
+ {'name': 'ant'},
+ ]
+ cmd = ListCommand(MagicMock())
+ cmd.print_project_names(filter_auth_role='project_admin')
+ expected_calls = [
+ call("mouse"),
+ call("\n"),
+ call("ant"),
+ call("\n")
+ ]
+ self.assertEqual(expected_calls, mock_print.call_args_list)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"flake8",
"mock",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/Duke-GCB/DukeDSClient.git@0293338fa21dd12611e2991d012a1c4e801a361f#egg=DukeDSClient
flake8==5.0.4
future==0.16.0
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
PyYAML==3.12
requests==2.13.0
six==1.10.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: DukeDSClient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- flake8==5.0.4
- future==0.16.0
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- pyyaml==3.12
- requests==2.13.0
- six==1.10.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/DukeDSClient
| [
"ddsc/tests/test_cmdparser.py::TestCommandParser::test_list_command",
"ddsc/tests/test_ddsclient.py::TestListCommand::test_print_project_names_no_auth_role",
"ddsc/tests/test_ddsclient.py::TestListCommand::test_print_project_names_with_auth_role"
] | [] | [
"ddsc/core/tests/test_remotestore.py::TestProjectFolderFile::test_file_item",
"ddsc/core/tests/test_remotestore.py::TestProjectFolderFile::test_file_item_new_version",
"ddsc/core/tests/test_remotestore.py::TestProjectFolderFile::test_folder_item",
"ddsc/core/tests/test_remotestore.py::TestProjectFolderFile::test_project_list_item",
"ddsc/core/tests/test_remotestore.py::TestRemoteUser::test_parse_user",
"ddsc/core/tests/test_remotestore.py::TestRemoteAuthRole::test_deprecated_system_role",
"ddsc/core/tests/test_remotestore.py::TestRemoteAuthRole::test_parse_auth_role",
"ddsc/core/tests/test_remotestore.py::TestRemoteStore::test_auth_roles_project",
"ddsc/core/tests/test_remotestore.py::TestRemoteStore::test_auth_roles_system",
"ddsc/core/tests/test_remotestore.py::TestRemoteStore::test_get_projects_with_auth_role",
"ddsc/core/tests/test_remotestore.py::TestRemoteProjectChildren::test_simple_case",
"ddsc/core/tests/test_remotestore.py::TestRemoteProjectChildren::test_top_level_files",
"ddsc/core/tests/test_remotestore.py::TestReadRemoteHash::test_new_way_one_item",
"ddsc/core/tests/test_remotestore.py::TestReadRemoteHash::test_new_way_two_item",
"ddsc/core/tests/test_remotestore.py::TestReadRemoteHash::test_old_way",
"ddsc/core/tests/test_remotestore.py::TestRemoteAuthProvider::test_constructor",
"ddsc/core/tests/test_remotestore.py::TestRemoteAuthProvider::test_get_auth_providers",
"ddsc/core/tests/test_remotestore.py::TestRemoteAuthProvider::test_register_user_by_username",
"ddsc/core/tests/test_remotestore.py::TestRemoteAuthProvider::test_register_user_by_username_with_no_default_provider",
"ddsc/tests/test_cmdparser.py::TestCommandParser::test_deliver_no_msg",
"ddsc/tests/test_cmdparser.py::TestCommandParser::test_deliver_with_msg",
"ddsc/tests/test_cmdparser.py::TestCommandParser::test_register_add_user_command_no_msg",
"ddsc/tests/test_cmdparser.py::TestCommandParser::test_register_remove_user_command",
"ddsc/tests/test_cmdparser.py::TestCommandParser::test_share_no_msg",
"ddsc/tests/test_cmdparser.py::TestCommandParser::test_share_with_msg",
"ddsc/tests/test_ddsclient.py::TestUploadCommand::test_with_dry_run",
"ddsc/tests/test_ddsclient.py::TestUploadCommand::test_without_dry_run",
"ddsc/tests/test_ddsclient.py::TestShareCommand::test_run_message",
"ddsc/tests/test_ddsclient.py::TestShareCommand::test_run_no_message",
"ddsc/tests/test_ddsclient.py::TestDeliverCommand::test_run_message",
"ddsc/tests/test_ddsclient.py::TestDeliverCommand::test_run_no_message",
"ddsc/tests/test_ddsclient.py::TestDDSClient::test_read_argument_file_contents"
] | [] | MIT License | 1,539 | 755 | [
"ddsc/cmdparser.py",
"ddsc/ddsclient.py"
] |
|
google__mobly-284 | e5df6ca2918e21233d3d207207964b82319c9f9d | 2017-08-01 02:08:05 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | dthkao:
Review status: 0 of 2 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 122 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTgIHnBFx-nHQQcOAq:-KqTgIHnBFx-nHQQcOAr:b-acx593) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L122)):*
> ```Python
>
> class TestResultRecord(object):
> """A record that holds the information of a single test case.
> ```
"test case" nomenclature?
---
*[mobly/records.py, line 132 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTgS0N6lQ8TLdFtNJ5:-KqTgS0OviyvCih_C-0R:bnc8sd2) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L132)):*
> ```Python
> extras: User defined extra information of the test result, must be
> serializable.
> details: string, description of the cause of the test's termination.
> ```
nit: is yapf formatting this way? the different alignments for each arg are kind of distracting.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqTiCM4ZWPPVnwvXdjG:boxqafn) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
> ```Python
> # error as the signal if there is any.
> failure_location = ''
> if e is None and self.extra_errors:
> ```
Just for my understanding, is there never a scenario where we have extra errors but the result is something other than ERROR or FAIL?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 132 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTgS0N6lQ8TLdFtNJ5:-KqTlZLblhxBIk6Sik2R:b-sa7406) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L132)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
nit: is yapf formatting this way? the different alignments for each arg are kind of distracting.
</blockquote></details>
Our convention is to indent to after ": ".
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqTlkFZb7qqCuCXoPuR:b-j9rm3x) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
Just for my understanding, is there never a scenario where we have extra errors but the result is something other than ERROR or FAIL?
</blockquote></details>
If extra error exists, the test should be ERROR, otherwise it's a bug.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 122 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTgIHnBFx-nHQQcOAq:-KqTm-oNT1t1p-LYmAAk:b-896fix) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L122)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
"test case" nomenclature?
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 2 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqTor4h_KJbD4nSbht7:by8gldi) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
If extra error exists, the test should be ERROR, otherwise it's a bug.
</blockquote></details>
That does not seem right. If a test fails and then an error occurs in teardown the result should be fail.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqTp4dZkO_WrQ2rWfOE:b-wh33nw) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
That does not seem right. If a test fails and then an error occurs in teardown the result should be fail.
</blockquote></details>
No, the result should be error in that case, as it is today and in all other frameworks (junit, pyunit etc).
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 2 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqTpbZVQM6Vi-tcjDXF:b1f97c1) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
No, the result should be error in that case, as it is today and in all other frameworks (junit, pyunit etc).
</blockquote></details>
1) Pyunit does not have a notion of an extra error,
2) It still records the failure
3) Your code does not seem to support that behavior: if there is a failure marked as failure that is not changed with the extra error message moved to details
$ cat pytesttest.py
davidkao@davidkao:~$ cat pytesttest.py
import unittest
class TestErrorReporting(unittest.TestCase):
def tearDown(self):
raise Exception('ow')
def test_fail(self):
self.assertEqual('bar', 'FOO')
if __name__ == '__main__':
unittest.main()
$ python pytesttest.py
FE
======================================================================
ERROR: test_fail (__main__.TestErrorReporting)
----------------------------------------------------------------------
Traceback (most recent call last):
File "pytesttest.py", line 16, in tearDown
raise Exception('ow')
Exception: ow
======================================================================
FAIL: test_fail (__main__.TestErrorReporting)
----------------------------------------------------------------------
Traceback (most recent call last):
File "pytesttest.py", line 23, in test_fail
self.assertEqual('bar', 'FOO')
AssertionError: 'bar' != 'FOO'
----------------------------------------------------------------------
Ran 1 test in 0.001s
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqTrz_Hc9lHr0GvZNIT:b-pl5y07) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
1) Pyunit does not have a notion of an extra error,
2) It still records the failure
3) Your code does not seem to support that behavior: if there is a failure marked as failure that is not changed with the extra error message moved to details
$ cat pytesttest.py
davidkao@davidkao:~$ cat pytesttest.py
import unittest
class TestErrorReporting(unittest.TestCase):
def tearDown(self):
raise Exception('ow')
def test_fail(self):
self.assertEqual('bar', 'FOO')
if __name__ == '__main__':
unittest.main()
$ python pytesttest.py
FE
======================================================================
ERROR: test_fail (__main__.TestErrorReporting)
----------------------------------------------------------------------
Traceback (most recent call last):
File "pytesttest.py", line 16, in tearDown
raise Exception('ow')
Exception: ow
======================================================================
FAIL: test_fail (__main__.TestErrorReporting)
----------------------------------------------------------------------
Traceback (most recent call last):
File "pytesttest.py", line 23, in test_fail
self.assertEqual('bar', 'FOO')
AssertionError: 'bar' != 'FOO'
----------------------------------------------------------------------
Ran 1 test in 0.001s
</blockquote></details>
Pyunit always says "FAILED" in the end as long as it's not pass.
But if you see the detailed output, it singles out errors.
For our purposes, if something threw in `teardown_test`, then something went unexpectedly wrong, which translates to ERROR. Otherwise, a failed test with or without teardown error would be reported the same, and somebody would have to click the summary to read the extra errors to discover that something went unexpectedly wrong.
That behavior would make teardown errors hard to discover, which is not good. Hence we intentionally change the test result to error.
I don't really know what 3 means. if the test body explicitly Failed, then we won't move extra error to main.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqTumLKX8fA_esUqlpn:b-kc55ub) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
> Pyunit always says "FAILED" in the end as long as it's not pass.
> But if you see the detailed output, it singles out errors.
It records them yes, but dow not single them out. I encourage you to run the simple testcase above. Your suggestion here would be to allow a teardown error to hide a test failure?
> For our purposes, if something threw in teardown_test, then something went unexpectedly wrong, which translates to ERROR. Otherwise, a failed test with or without teardown error would be reported the same, and somebody would have to click the summary to read the extra errors to discover that something went unexpectedly wrong.
"Otherwise, a failed test with or without teardown error would be reported the same" <-- this is exactly what should happen. The failure occurred first and them potentially caused extra errors.
> That behavior would make teardown errors hard to discover, which is not good. Hence we intentionally change the test result to error.
We should only change the result to error if it was Pass before teardown. Please see comment in https://github.com/google/mobly/issues/264
> I don't really know what 3 means. if the test body explicitly Failed, then we won't move extra error to main.
What happens if somehow we call `self._test_end(TestResultEnums.TEST_RESULT_FAIL, None)`?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KqUJFOd69F66aABin7p:b-xxjkh1) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
> Pyunit always says "FAILED" in the end as long as it's not pass.
> But if you see the detailed output, it singles out errors.
It records them yes, but dow not single them out. I encourage you to run the simple testcase above. Your suggestion here would be to allow a teardown error to hide a test failure?
> For our purposes, if something threw in teardown_test, then something went unexpectedly wrong, which translates to ERROR. Otherwise, a failed test with or without teardown error would be reported the same, and somebody would have to click the summary to read the extra errors to discover that something went unexpectedly wrong.
"Otherwise, a failed test with or without teardown error would be reported the same" <-- this is exactly what should happen. The failure occurred first and them potentially caused extra errors.
> That behavior would make teardown errors hard to discover, which is not good. Hence we intentionally change the test result to error.
We should only change the result to error if it was Pass before teardown. Please see comment in https://github.com/google/mobly/issues/264
> I don't really know what 3 means. if the test body explicitly Failed, then we won't move extra error to main.
What happens if somehow we call `self._test_end(TestResultEnums.TEST_RESULT_FAIL, None)`?
</blockquote></details>
That comment only affects where the error description and stacktrace are recorded, it doesn't mention the final result of the test entry.
If error occurs in teardown, the test is ERROR; this is expected behavior.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan: Made the change to properly record exception objects.
* Record stacktrace for each exception object.
* Record the actual exception object for the main termination signal.
This paves the road for better debug info in `on_fail` as required by #281
dthkao:
Review status: 0 of 3 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/records.py, line 132 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTgS0N6lQ8TLdFtNJ5:-Kr2ef8oe7Ird8oaa9l_:b7tfnuk) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L132)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Our convention is to indent to after ": ".
</blockquote></details>
Discussed offline, filed https://github.com/google/mobly/issues/300. Acknowledged for now
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-Kr2fHm7qvD8Mu8EFaL-:b-9exoj3) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
That comment only affects where the error description and stacktrace are recorded, it doesn't mention the final result of the test entry.
If error occurs in teardown, the test is ERROR; this is expected behavior.
</blockquote></details>
It is not expected behavior. If a test fails in the test case an error in teardown should not cause that failure to vanish. This was one of the #1 reasons to reexamine test result recording.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-Kr2yMMfCWPH9jvSHs6B:bbpfs5s) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
It is not expected behavior. If a test fails in the test case an error in teardown should not cause that failure to vanish. This was one of the #1 reasons to reexamine test result recording.
</blockquote></details>
This is intentional.
1. test fail, teardown pass -> FAIL
2. teat fail, teardown error -> ERROR
The "failure" is not "lost" in case 2. The termination signal is still the original failure, the details and stacktrace fields still use the original failure's info.
But an error in teardown test *should* change the status of the test result because something unexpected happened in teardown.
A test is tallied as FAIL only if an explicit test failure signal is thrown in test body and no other errors occurred in the test.
If anything unexpected happen, the status is ERROR.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan: There's a bug where errors in functions like `on_fail` are not tallied properly.
Need another patch.
dthkao:
Review status: 0 of 3 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-Kr49v8Qlmz11BJJodd9:byltc7v) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
This is intentional.
1. test fail, teardown pass -> FAIL
2. teat fail, teardown error -> ERROR
The "failure" is not "lost" in case 2. The termination signal is still the original failure, the details and stacktrace fields still use the original failure's info.
But an error in teardown test *should* change the status of the test result because something unexpected happened in teardown.
A test is tallied as FAIL only if an explicit test failure signal is thrown in test body and no other errors occurred in the test.
If anything unexpected happen, the status is ERROR.
</blockquote></details>
1) I disagree entirely. See the pytest example above. When a test fails and an error is thrown in teardown a failure and error are BOTH counted: there is a failure result and an EXTRA error. If the details and stacktrace reflect the failure then the result should also. Otherwise this will only lead to confusion as to why a result with information coming from a mode of failure is marked as error.
2) The logic implemented here itself is inconsistent with your claim. It is possible to call test_fail with no error message:
`def test_fail(self, e=None)` and in this scenario the code explicitly subverts the result marked by the user. to replace it with an 'extra_error'.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-Kr7vy8js6-dUfo3Pu30:bsgoujm) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
1) I disagree entirely. See the pytest example above. When a test fails and an error is thrown in teardown a failure and error are BOTH counted: there is a failure result and an EXTRA error. If the details and stacktrace reflect the failure then the result should also. Otherwise this will only lead to confusion as to why a result with information coming from a mode of failure is marked as error.
2) The logic implemented here itself is inconsistent with your claim. It is possible to call test_fail with no error message:
`def test_fail(self, e=None)` and in this scenario the code explicitly subverts the result marked by the user. to replace it with an 'extra_error'.
</blockquote></details>
Regardless of what you agree, can we get this PR in?
This PR addresses the issue of exception objects not being stored properly in records.
As the initial commit message says, this is the first of multiple PRs to come.
We can address other concerns in later PRs.
We are keeping that tracking issue open until all the PRs are in.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
k2fong:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-Kr8PO87zIe_2us-T9V7:b-8lvomq) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Regardless of what you agree, can we get this PR in?
This PR addresses the issue of exception objects not being stored properly in records.
As the initial commit message says, this is the first of multiple PRs to come.
We can address other concerns in later PRs.
We are keeping that tracking issue open until all the PRs are in.
</blockquote></details>
Just giving my 2 cents here, my recommendation is to follow how pytest handles since that'll probably be the user's expectation of mobly. From a little test of mine, looks like a test failure (with teardown error) results in a reporting of "FAILED (failures=1, errors=1)". Can we do similar? Also, it provided stack trace for the failure in test and the error in teardown. From an offline chat, sounds like to do so might be a big task, what you guys think about the path forward to just continue however mobly is reporting currently today so as to minimize user impacted in the meantime. Unless that was partly the reason for this PR and I'm assuming users are either okay with how Mobly is reporting or has created some workaround.
---
*[mobly/records.py, line 197 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3inUC5cHYesCg3hIN:-Kr3inUC5cHYesCg3hIO:b-bvf5k5) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L197)):*
> if e:
> self.termination_signal = e
no need for this 'if e' clause. Just assign?
---
*[mobly/records.py, line 200 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3jiRas15KiEg1ApFP:-Kr3jiRas15KiEg1ApFQ:bd3d4ok) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L200)):*
> e is None
e should be None at this point already
---
*[mobly/records.py, line 203 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3kD9eWG2D5rUDEOU-:-Kr3kD9eWG2D5rUDEOU0:b4eguto) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L203)):*
> if failure_location:
> failure_location += ': '
simplify one less logic by adding this inside the failure_location assignment above. Unless its possible that the popitem() above might return None or ''
---
*[mobly/records.py, line 205 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3lPLRAOw3SbbSD8V7:-Kr3lPLRAOw3SbbSD8V8:bh6yzzm) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L205)):*
> if isinstance(e, signals.TestSignal):
> self.details = failure_location + str(e.details)
>
>
>
> self.extras = e.extras
> elif isinstance(e, Exception):
> self.details = failure_location + str(e)
Seems like if the logic enters into either of these, failure_location will be empty string. Is that intended?
---
*[mobly/records.py, line 223 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3nm3ORrhOCtOJM6Km:-Kr3nm3ORrhOCtOJM6Kn:b1nwfy1) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L223)):*
> if e and not hasattr(e, 'stacktrace_str'):
> e.stacktrace_str = stacktrace_str
should test end have side effect on e passed in?
---
*[mobly/records.py, line 284 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3oVzGwyFl-d3k9t2Q:-Kr3oVzGwyFl-d3k9t2R:bxfio11) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L284)):*
<details><summary><i alt="summary">Quoted 12 lines of code…</i></summary>
> # This check cannot be based on try...except, which messes up
> # `exc_info`.
> if e and hasattr(e, '__traceback__'):
> exc_traceback = e.__traceback__
> else:
> # In py2, exception objects don't have built-in traceback, so we
> # have to immediately retrieve stacktrace from `sys.exc_info`.
> _, _, exc_traceback = sys.exc_info()
> if exc_traceback:
> stacktrace_str = ''.join(traceback.format_tb(exc_traceback))
> if not hasattr(e, 'stacktrace_str'):
> e.stacktrace_str = stacktrace_str
</details>
This look like a candidate for refactor. Exact code above
---
*[mobly/records.py, line 322 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3ptASE2tC2CxyjWH7:-Kr3ptASE2tC2CxyjWH8:b-4oha6q) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L322)):*
<details><summary><i alt="summary">Quoted 4 lines of code…</i></summary>
> try:
> exception_dict[TestResultEnums.RECORD_EXTRAS] = e.extras
> except AttributeError:
> pass
</details>
can hasattr() be used here instead of relying on exception being raised?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-Kr8VMvx7t5gc8BpMUR2:b-xqgz0k) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
Just giving my 2 cents here, my recommendation is to follow how pytest handles since that'll probably be the user's expectation of mobly. From a little test of mine, looks like a test failure (with teardown error) results in a reporting of "FAILED (failures=1, errors=1)". Can we do similar? Also, it provided stack trace for the failure in test and the error in teardown. From an offline chat, sounds like to do so might be a big task, what you guys think about the path forward to just continue however mobly is reporting currently today so as to minimize user impacted in the meantime. Unless that was partly the reason for this PR and I'm assuming users are either okay with how Mobly is reporting or has created some workaround.
</blockquote></details>
The line "FAILED (failures=1, errors=1)" is the test run summary line of all tests in a run, we have a similar summary line at the end, and we are not really discussing this line here...
We are talking about the status of a single test record.
We are not a unit test framework, so we can't copy pyunit as it is. This is one of the differ points between unit tests and E2E tests.
If that's what you prefer, I'm ok with keeping the current behavior because there have been no complaints from users who run tests everyday.
---
*[mobly/records.py, line 200 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3jiRas15KiEg1ApFP:-Kr8WTTciz-tcCwsDzco:brza0n9) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L200)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> e is None
e should be None at this point already
</blockquote></details>
why? e is passed in, it is not always None.
---
*[mobly/records.py, line 203 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3kD9eWG2D5rUDEOU-:-Kr8XCyLsylMTQ0wha9y:bjxq2oh) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L203)):*
It actually can be...
```
>>> x = OrderedDict()
>>> x[None] = 'a'
>>> x
OrderedDict([(None, 'a')])
```
---
*[mobly/records.py, line 205 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3lPLRAOw3SbbSD8V7:-Kr8XHqZm-qbRsSkiSX0:baioqxy) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L205)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> if isinstance(e, signals.TestSignal):
> self.details = failure_location + str(e.details)
>
>
>
> self.extras = e.extras
> elif isinstance(e, Exception):
> self.details = failure_location + str(e)
Seems like if the logic enters into either of these, failure_location will be empty string. Is that intended?
</blockquote></details>
Yes. E.g. there's no `failure_location` for the self.termination_signal.
---
*[mobly/records.py, line 223 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3nm3ORrhOCtOJM6Km:-Kr8XkXmbMJv_MgYYAZ_:b-h3ul1p) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L223)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> if e and not hasattr(e, 'stacktrace_str'):
> e.stacktrace_str = stacktrace_str
should test end have side effect on e passed in?
</blockquote></details>
this e is used internally for recording test execution info, so it's kinda ok...
the thing is, if we don't save the stacktrace right here, we lose it.
it's sad but it's python...
---
*[mobly/records.py, line 284 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3oVzGwyFl-d3k9t2Q:-Kr8YNUnQM53SkOIRPXC:bs0rvn9) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L284)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> # This check cannot be based on try...except, which messes up
> # `exc_info`.
> if e and hasattr(e, '__traceback__'):
> exc_traceback = e.__traceback__
> else:
> # In py2, exception objects don't have built-in traceback, so we
> # have to immediately retrieve stacktrace from `sys.exc_info`.
> _, _, exc_traceback = sys.exc_info()
> if exc_traceback:
> stacktrace_str = ''.join(traceback.format_tb(exc_traceback))
> if not hasattr(e, 'stacktrace_str'):
> e.stacktrace_str = stacktrace_str
This look like a candidate for refactor. Exact code above
</blockquote></details>
not sure what you mean. can you clarify.
also it's really hard to read this post due to formatting...
---
*[mobly/records.py, line 322 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3ptASE2tC2CxyjWH7:-Kr8Y4l2nK2TlcupyFHX:b1undhl) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L322)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> try:
> exception_dict[TestResultEnums.RECORD_EXTRAS] = e.extras
> except AttributeError:
> pass
can hasattr() be used here instead of relying on exception being raised?
</blockquote></details>
Don't have a strong preference.
i thought you liked pythonic style?
"it's better to ask for forgiveness than permission" :P
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
k2fong:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 200 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3jiRas15KiEg1ApFP:-Kr9NlBZLx3BU5371B2y:b1r6lk5) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L200)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
why? e is passed in, it is not always None.
</blockquote></details>
because of the 'if e' on above
---
*[mobly/records.py, line 203 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3kD9eWG2D5rUDEOU-:-Kr9To43i_0oHdx3dDI3:bg7zopp) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L203)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
It actually can be...
```
>>> x = OrderedDict()
>>> x[None] = 'a'
>>> x
OrderedDict([(None, 'a')])
```
</blockquote></details>
can it be like below? just trying to reduce the number of conditional logics here.
elif e is None and self.extra_errors:
failure_location, e = self.extra_errors.popitem(last=False)
failure_location += ': '
---
*[mobly/records.py, line 205 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3lPLRAOw3SbbSD8V7:-Kr9Osv8BYHFtQlRGpHY:buvaev8) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L205)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Yes. E.g. there's no `failure_location` for the self.termination_signal.
</blockquote></details>
if that's the case then I'll suggest not using failure_location here at all
---
*[mobly/records.py, line 223 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3nm3ORrhOCtOJM6Km:-Kr9PNuYPYR77a1qfcyD:b-tg73hw) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L223)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
this e is used internally for recording test execution info, so it's kinda ok...
the thing is, if we don't save the stacktrace right here, we lose it.
it's sad but it's python...
</blockquote></details>
if so, might be worthwhile to indicate so in the header
---
*[mobly/records.py, line 284 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3oVzGwyFl-d3k9t2Q:-Kr9PbW-PVzlWsD5ShSQ:bsgxs7k) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L284)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
not sure what you mean. can you clarify.
also it's really hard to read this post due to formatting...
</blockquote></details>
sorry about that, I was expecting it'll highlight code similar to critique :p my comment is that line 219-233 and 288-300 are pretty much identical beside 2 lines that I'm not sure if they should be different either. can it be refactored to be combine into some method?
---
*[mobly/records.py, line 322 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3ptASE2tC2CxyjWH7:-Kr9S1FKj2qHXgu0nPcN:b-qna566) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L322)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Don't have a strong preference.
i thought you liked pythonic style?
"it's better to ask for forgiveness than permission" :P
</blockquote></details>
hmm not me. but is this style really the pythonic style? I prefer hasattr() especially since its use else where in this file as well.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 200 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3jiRas15KiEg1ApFP:-KrCGa4Aefw6mqIKkqST:bjgacef) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L200)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
because of the 'if e' on above
</blockquote></details>
Ok, but I fail to see the point here...
If e is None, we want to assign it a not None value from extra errors.
---
*[mobly/records.py, line 203 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3kD9eWG2D5rUDEOU-:-KrCGo05U8oXQTCWbjPt:bnoc1ps) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L203)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
can it be like below? just trying to reduce the number of conditional logics here.
elif e is None and self.extra_errors:
failure_location, e = self.extra_errors.popitem(last=False)
failure_location += ': '
</blockquote></details>
No, because failure_location could have be overwritten to None by the pop and the `+=` would fail with type error.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 205 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3lPLRAOw3SbbSD8V7:-KrCGyyoTtcsammxe0LQ:bxu03q6) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L205)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
if that's the case then I'll suggest not using failure_location here at all
</blockquote></details>
But we do need to prepend it here if failure_location is not empty string.
This is just a shortcut to reduce `if` statements.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 284 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3oVzGwyFl-d3k9t2Q:-KrCHAAK5pYCavnPI-bj:bc4us8f) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L284)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
sorry about that, I was expecting it'll highlight code similar to critique :p my comment is that line 219-233 and 288-300 are pretty much identical beside 2 lines that I'm not sure if they should be different either. can it be refactored to be combine into some method?
</blockquote></details>
No, because we are taking stacktraces here so another func stack would create noise in the stacktrace
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 322 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3ptASE2tC2CxyjWH7:-KrCHYXuXXUVNDbXHZd6:b-5caqun) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L322)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
hmm not me. but is this style really the pythonic style? I prefer hasattr() especially since its use else where in this file as well.
</blockquote></details>
Yes. It is quite common and somewhat recommended to use try...except in place of `if` in Python.
https://stackoverflow.com/questions/7604636/better-to-try-something-and-catch-the-exception-or-test-if-its-possible-first
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 3 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 197 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3inUC5cHYesCg3hIN:-KrCqIigbI--dMXyrhd3:b3vi6kr) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L197)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> if e:
> self.termination_signal = e
no need for this 'if e' clause. Just assign?
</blockquote></details>
Will refactor
---
*[mobly/records.py, line 200 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3jiRas15KiEg1ApFP:-KrCq90ebf4aJ56LV1cE:b9ty8wm) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L200)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Ok, but I fail to see the point here...
If e is None, we want to assign it a not None value from extra errors.
</blockquote></details>
Oh, i see.
`if e` doesn't gurantee `e is not None`
e can be '' or other values that evaluate to false.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan: Done with the fixes.
Here's an example of a grand failure where test body, `teardown_test`, and `on_fail` all failed.
The entry looks like:
```
---
"Begin Time": 1502406537536
Details: "fail in body."
"End Time": 1502406537537
"Extra Errors":
-
Details: "teardown test error."
Extras: null
Position: teardown_test
Stacktrace: |
Traceback (most recent call last):
File "/Users/angli/Developer/mobly/mobly/base_test.py", line 341, in exec_one_test
self._teardown_test(test_name)
File "/Users/angli/Developer/mobly/mobly/base_test.py", line 205, in _teardown_test
self.teardown_test()
File "tests/lib/integration_test.py", line 37, in teardown_test
raise Exception('teardown test error.')
Exception: teardown test error.
-
Details: "on_fail error."
Extras: null
Position: _on_fail
Stacktrace: |
Traceback (most recent call last):
File "/Users/angli/Developer/mobly/mobly/base_test.py", line 301, in _exec_procedure_func
func(tr_record)
File "/Users/angli/Developer/mobly/mobly/base_test.py", line 226, in _on_fail
self.on_fail(test_name, begin_time)
File "tests/lib/integration_test.py", line 40, in on_fail
raise Exception('on_fail error.')
Exception: on_fail error.
Extras: null
Result: FAIL
Stacktrace: |
Traceback (most recent call last):
File "/Users/angli/Developer/mobly/mobly/base_test.py", line 332, in exec_one_test
test_method()
File "tests/lib/integration_test.py", line 33, in test_hello_world
asserts.fail('fail in body.')
File "/Users/angli/Developer/mobly/mobly/asserts.py", line 241, in fail
raise signals.TestFailure(msg, extras)
TestFailure: Details=fail in body., Extras=None
"Test Class": IntegrationTest
"Test Name": test_hello_world
Type: Record
UID: null
```
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/records.py, line 180 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTiCM4ZWPPVnwvXdjF:-KrDR-RS0xcanNO7EY7o:b-896fix) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L180)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
The line "FAILED (failures=1, errors=1)" is the test run summary line of all tests in a run, we have a similar summary line at the end, and we are not really discussing this line here...
We are talking about the status of a single test record.
We are not a unit test framework, so we can't copy pyunit as it is. This is one of the differ points between unit tests and E2E tests.
If that's what you prefer, I'm ok with keeping the current behavior because there have been no complaints from users who run tests everyday.
</blockquote></details>
Done.
---
*[mobly/records.py, line 197 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3inUC5cHYesCg3hIN:-KrDR-lwuqUrfUCxzh88:b-896fix) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L197)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Will refactor
</blockquote></details>
Done.
---
*[mobly/records.py, line 200 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3jiRas15KiEg1ApFP:-KrDR0LNJcYqMPQj2_AW:b-896fix) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L200)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Oh, i see.
`if e` doesn't gurantee `e is not None`
e can be '' or other values that evaluate to false.
</blockquote></details>
Done.
---
*[mobly/records.py, line 203 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3kD9eWG2D5rUDEOU-:-KrDR2JLvD3aQz1wBbuv:b-896fix) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L203)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
No, because failure_location could have be overwritten to None by the pop and the `+=` would fail with type error.
</blockquote></details>
Done.
---
*[mobly/records.py, line 205 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3lPLRAOw3SbbSD8V7:-KrDR2xwa7grak5QWfX1:b-ruqkw2) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L205)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
But we do need to prepend it here if failure_location is not empty string.
This is just a shortcut to reduce `if` statements.
</blockquote></details>
Obsolete
---
*[mobly/records.py, line 223 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3nm3ORrhOCtOJM6Km:-KrDR4rUDgRX4uXbHfhK:b-ruqkw2) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L223)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
if so, might be worthwhile to indicate so in the header
</blockquote></details>
Obsolete
---
*[mobly/records.py, line 284 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3oVzGwyFl-d3k9t2Q:-KrDR5PkwfBzTeyiP04y:b-ruqkw2) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L284)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
No, because we are taking stacktraces here so another func stack would create noise in the stacktrace
</blockquote></details>
Obsolete
---
*[mobly/records.py, line 322 at r3](https://reviewable.io:443/reviews/google/mobly/284#-Kr3ptASE2tC2CxyjWH7:-KrDR7V21Wjo1AyvM3sm:b-896fix) ([raw file](https://github.com/google/mobly/blob/d3fd8b09c1d6875e09c2aba36aea69c2b15aa67d/mobly/records.py#L322)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Yes. It is quite common and somewhat recommended to use try...except in place of `if` in Python.
https://stackoverflow.com/questions/7604636/better-to-try-something-and-catch-the-exception-or-test-if-its-possible-first
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan: Rebased
k2fong:
Review status: 0 of 4 files reviewed at latest revision, 5 unresolved discussions.
---
*[mobly/records.py, line 260 at r8](https://reviewable.io:443/reviews/google/mobly/284#-KrDywQj97Vemgr_3o27:-KrDywQj97Vemgr_3o28:b-ce0f4g) ([raw file](https://github.com/google/mobly/blob/be96f35805b0015c8d1ae1456be4c4c133b7e167/mobly/records.py#L260)):*
> ```Python
> if self.result in (TestResultEnums.TEST_RESULT_PASS,
> TestResultEnums.TEST_RESULT_SKIP):
> self.result = TestResultEnums.TEST_RESULT_ERROR
> ```
Nit. if pass, skip, error, and fail are all the possible result value, will it read better if its if self.result != TestResultEnums.TEST_RESULT_FAIL? I think there might be one more similar down below.
---
*[mobly/records.py, line 268 at r8](https://reviewable.io:443/reviews/google/mobly/284#-KrE-3-Q6Zo1E-oKKmX6:-KrE-3-Q6Zo1E-oKKmX7:b-r67v17) ([raw file](https://github.com/google/mobly/blob/be96f35805b0015c8d1ae1456be4c4c133b7e167/mobly/records.py#L268)):*
> ```Python
> self.details = self.termination_signal.details
> self.stacktrace = self.termination_signal.stacktrace
> self.extras = self.termination_signal.extras
> ```
another nit. instead of keeping more states within this class, how about defining properties for details, stacktrace, and extras and just return the results from termination_signal?
---
*[mobly/records.py, line 326 at r8](https://reviewable.io:443/reviews/google/mobly/284#-KrEUJVSrNSl4QdD03V0:-KrEUJVSrNSl4QdD03V1:bt2g9lo) ([raw file](https://github.com/google/mobly/blob/be96f35805b0015c8d1ae1456be4c4c133b7e167/mobly/records.py#L326)):*
> ```Python
> TestResultEnums.TEST_RESULT_SKIP):
> self.result = TestResultEnums.TEST_RESULT_ERROR
> self.extra_errors[position] = ExceptionRecord(e, position=position)
> ```
I don't believe its used that way but if in case the same position is passed in, the extra error will be overridden for that position. is that intended? if that is the intend, how about call this set_error()?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, 5 unresolved discussions.
---
*[mobly/records.py, line 260 at r8](https://reviewable.io:443/reviews/google/mobly/284#-KrDywQj97Vemgr_3o27:-KrEs1w5TtPCylkxPY6e:b-896fix) ([raw file](https://github.com/google/mobly/blob/be96f35805b0015c8d1ae1456be4c4c133b7e167/mobly/records.py#L260)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
Nit. if pass, skip, error, and fail are all the possible result value, will it read better if its if self.result != TestResultEnums.TEST_RESULT_FAIL? I think there might be one more similar down below.
</blockquote></details>
Done.
---
*[mobly/records.py, line 268 at r8](https://reviewable.io:443/reviews/google/mobly/284#-KrE-3-Q6Zo1E-oKKmX6:-KrEtbz8tzWeG6-LZ139:b-896fix) ([raw file](https://github.com/google/mobly/blob/be96f35805b0015c8d1ae1456be4c4c133b7e167/mobly/records.py#L268)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
another nit. instead of keeping more states within this class, how about defining properties for details, stacktrace, and extras and just return the results from termination_signal?
</blockquote></details>
Done.
---
*[mobly/records.py, line 326 at r8](https://reviewable.io:443/reviews/google/mobly/284#-KrEUJVSrNSl4QdD03V0:-KrEuE6uo4SkpLsupxmH:boji8w4) ([raw file](https://github.com/google/mobly/blob/be96f35805b0015c8d1ae1456be4c4c133b7e167/mobly/records.py#L326)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
I don't believe its used that way but if in case the same position is passed in, the extra error will be overridden for that position. is that intended? if that is the intend, how about call this set_error()?
</blockquote></details>
Added a check.
This is not a huge concern as only the framework itself sets the position.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 4 files reviewed at latest revision, 6 unresolved discussions.
---
*[mobly/records.py, line 132 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTgS0N6lQ8TLdFtNJ5:-KrFCajBpfuFPCe6Dw20:b46bouh) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L132)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
Discussed offline, filed https://github.com/google/mobly/issues/300. Acknowledged for now
</blockquote></details>
indentation pr is in so bringing this back up.
---
*[mobly/records.py, line 250 at r9](https://reviewable.io:443/reviews/google/mobly/284#-KrFDkoRafxNWkrjewJw:-KrFDkoRafxNWkrjewJx:b-3csec2) ([raw file](https://github.com/google/mobly/blob/cda0a0c386c5a562c6a7c513748620304480760a/mobly/records.py#L250)):*
> ```Python
> result: One of the TEST_RESULT enums in TestResultEnums.
> e: A test termination signal (usually an exception object). It can
> be any exception instance or of any subclass of
> ```
indent?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, 6 unresolved discussions, some commit checks pending.
---
*[mobly/records.py, line 132 at r1](https://reviewable.io:443/reviews/google/mobly/284#-KqTgS0N6lQ8TLdFtNJ5:-KrFFqqLFyTZgRBmidyB:b-896fix) ([raw file](https://github.com/google/mobly/blob/b8b32f7a4ec1519a61a320a6b3b2c5a1811fb48d/mobly/records.py#L132)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
indentation pr is in so bringing this back up.
</blockquote></details>
Done.
---
*[mobly/records.py, line 250 at r9](https://reviewable.io:443/reviews/google/mobly/284#-KrFDkoRafxNWkrjewJw:-KrFENweh8wyOPOsTBtJ:b-896fix) ([raw file](https://github.com/google/mobly/blob/cda0a0c386c5a562c6a7c513748620304480760a/mobly/records.py#L250)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
indent?
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
k2fong: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 4 files reviewed at latest revision, 3 unresolved discussions.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284#-:-KrHxDuGb-hjwujWNkXS:bnfp4nl)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Reviewed 1 of 3 files at r8, 1 of 1 files at r10.
Review status: 2 of 4 files reviewed at latest revision, 3 unresolved discussions.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/284)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/records.py b/mobly/records.py
index 62f130b..16072c1 100644
--- a/mobly/records.py
+++ b/mobly/records.py
@@ -14,6 +14,7 @@
"""This module has classes for test result collection, and test result output.
"""
+import collections
import itertools
import copy
import enum
@@ -119,23 +120,86 @@ class TestResultEnums(object):
RECORD_EXTRA_ERRORS = 'Extra Errors'
RECORD_DETAILS = 'Details'
RECORD_STACKTRACE = 'Stacktrace'
+ RECORD_POSITION = 'Position'
TEST_RESULT_PASS = 'PASS'
TEST_RESULT_FAIL = 'FAIL'
TEST_RESULT_SKIP = 'SKIP'
TEST_RESULT_ERROR = 'ERROR'
+class ExceptionRecord(object):
+ """A wrapper class for representing exception objects in TestResultRecord.
+
+ Attributes:
+ exception: Exception object, the original Exception.
+ stacktrace: string, stacktrace of the Exception.
+ extras: optional serializable, this corresponds to the
+ `TestSignal.extras` field.
+ position: string, an optional label specifying the position where the
+ Exception ocurred.
+ """
+
+ def __init__(self, e, position=None):
+ self.exception = e
+ self.stacktrace = None
+ self.extras = None
+ self.position = position
+ self.is_test_signal = isinstance(e, signals.TestSignal)
+ # Record stacktrace of the exception.
+ # This check cannot be based on try...except, which messes up
+ # `exc_info`.
+ if hasattr(e, '__traceback__'):
+ exc_traceback = e.__traceback__
+ else:
+ # In py2, exception objects don't have built-in traceback, so we
+ # have to immediately retrieve stacktrace from `sys.exc_info`.
+ _, _, exc_traceback = sys.exc_info()
+ if exc_traceback:
+ self.stacktrace = ''.join(
+ traceback.format_exception(e.__class__, e, exc_traceback))
+ # Populate fields based on the type of the termination signal.
+ if self.is_test_signal:
+ self.details = str(e.details)
+ self.extras = e.extras
+ else:
+ self.details = str(e)
+
+ def to_dict(self):
+ result = {}
+ result[TestResultEnums.RECORD_DETAILS] = self.details
+ result[TestResultEnums.RECORD_POSITION] = self.position
+ result[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace
+ result[TestResultEnums.RECORD_EXTRAS] = copy.deepcopy(self.extras)
+ return result
+
+
class TestResultRecord(object):
- """A record that holds the information of a test execution.
+ """A record that holds the information of a single test.
+
+ The record object holds all information of a test, including all the
+ exceptions occurred during the test.
+
+ A test can terminate for two reasons:
+ 1. the test function executes to the end and completes naturally.
+ 2. the test is terminated by an exception, which we call
+ "termination signal".
+
+ The termination signal is treated differently. Its content are extracted
+ into first-tier attributes of the record object, like `details` and
+ `stacktrace`, for easy consumption.
+
+ Note the termination signal is not always an error, it can also be explicit
+ pass signal or abort/skip signals.
Attributes:
- test_name: A string representing the name of the test method.
+ test_name: string, the name of the test.
begin_time: Epoch timestamp of when the test started.
end_time: Epoch timestamp of when the test ended.
- self.uid: Unique identifier of a test.
- self.result: Test result, PASS/FAIL/SKIP.
- self.extras: User defined extra information of the test result.
- self.details: A string explaining the details of the test.
+ uid: Unique identifier of a test.
+ termination_signal: ExceptionRecord, the main exception of the test.
+ extra_errors: OrderedDict, all exceptions occurred during the entire
+ test lifecycle. The order of occurrence is preserved.
+ result: TestResultEnum.TEAT_RESULT_*, PASS/FAIL/SKIP.
"""
def __init__(self, t_name, t_class=None):
@@ -144,11 +208,35 @@ class TestResultRecord(object):
self.begin_time = None
self.end_time = None
self.uid = None
+ self.termination_signal = None
+ self.extra_errors = collections.OrderedDict()
self.result = None
- self.extras = None
- self.details = None
- self.stacktrace = None
- self.extra_errors = {}
+
+ @property
+ def details(self):
+ """String description of the cause of the test's termination.
+
+ Note a passed test can have this as well due to the explicit pass
+ signal. If the test passed implicitly, this field would be None.
+ """
+ if self.termination_signal:
+ return self.termination_signal.details
+
+ @property
+ def stacktrace(self):
+ """The stacktrace string for the exception that terminated the test.
+ """
+ if self.termination_signal:
+ return self.termination_signal.stacktrace
+
+ @property
+ def extras(self):
+ """User defined extra information of the test result.
+
+ Must be serializable.
+ """
+ if self.termination_signal:
+ return self.termination_signal.extras
def test_begin(self):
"""Call this when the test begins execution.
@@ -158,7 +246,7 @@ class TestResultRecord(object):
self.begin_time = utils.get_current_epoch_time()
def _test_end(self, result, e):
- """Class internal function to signal the end of a test execution.
+ """Marks the end of the test logic.
Args:
result: One of the TEST_RESULT enums in TestResultEnums.
@@ -169,19 +257,25 @@ class TestResultRecord(object):
if self.begin_time is not None:
self.end_time = utils.get_current_epoch_time()
self.result = result
+ if e:
+ self.termination_signal = ExceptionRecord(e)
+
+ def update_record(self):
+ """Updates the content of a record.
+
+ Several display fields like "details" and "stacktrace" need to be
+ updated based on the content of the record object.
+
+ As the content of the record change, call this method to update all
+ the appropirate fields.
+ """
if self.extra_errors:
- self.result = TestResultEnums.TEST_RESULT_ERROR
- if isinstance(e, signals.TestSignal):
- self.details = e.details
- _, _, exc_traceback = sys.exc_info()
- if exc_traceback:
- self.stacktrace = ''.join(traceback.format_tb(exc_traceback))
- self.extras = e.extras
- elif isinstance(e, Exception):
- self.details = str(e)
- _, _, exc_traceback = sys.exc_info()
- if exc_traceback:
- self.stacktrace = ''.join(traceback.format_tb(exc_traceback))
+ if self.result != TestResultEnums.TEST_RESULT_FAIL:
+ self.result = TestResultEnums.TEST_RESULT_ERROR
+ # If no termination signal is provided, use the first exception
+ # occurred as the termination signal.
+ if not self.termination_signal and self.extra_errors:
+ _, self.termination_signal = self.extra_errors.popitem(last=False)
def test_pass(self, e=None):
"""To mark the test as passed in this record.
@@ -219,19 +313,29 @@ class TestResultRecord(object):
"""
self._test_end(TestResultEnums.TEST_RESULT_ERROR, e)
- def add_error(self, tag, e):
- """Add extra error happened during a test mark the test result as
+ def add_error(self, position, e):
+ """Add extra error happened during a test.
+
+ If the test has passed or skipped, this will mark the test result as
ERROR.
If an error is added the test record, the record's result is equivalent
to the case where an uncaught exception happened.
+ If the test record has not recorded any error, the newly added error
+ would be the main error of the test record. Otherwise the newly added
+ error is added to the record's extra errors.
+
Args:
- tag: A string describing where this error came from, e.g. 'on_pass'.
+ position: string, where this error occurred, e.g. 'teardown_test'.
e: An exception object.
"""
- self.result = TestResultEnums.TEST_RESULT_ERROR
- self.extra_errors[tag] = str(e)
+ if self.result != TestResultEnums.TEST_RESULT_FAIL:
+ self.result = TestResultEnums.TEST_RESULT_ERROR
+ if position in self.extra_errors:
+ raise Error('An exception is already recorded with position "%s",'
+ ' cannot reuse.' % position)
+ self.extra_errors[position] = ExceptionRecord(e, position=position)
def __str__(self):
d = self.to_dict()
@@ -259,7 +363,9 @@ class TestResultRecord(object):
d[TestResultEnums.RECORD_UID] = self.uid
d[TestResultEnums.RECORD_EXTRAS] = self.extras
d[TestResultEnums.RECORD_DETAILS] = self.details
- d[TestResultEnums.RECORD_EXTRA_ERRORS] = self.extra_errors
+ d[TestResultEnums.RECORD_EXTRA_ERRORS] = [
+ e.to_dict() for e in self.extra_errors.values()
+ ]
d[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace
return d
@@ -340,9 +446,13 @@ class TestResult(object):
A record is considered executed once it's added to the test result.
+ Adding the record finalizes the content of a record, so no change
+ should be made to the record afterwards.
+
Args:
record: A test record object to add.
"""
+ record.update_record()
if record.result == TestResultEnums.TEST_RESULT_SKIP:
self.skipped.append(record)
return
@@ -375,6 +485,7 @@ class TestResult(object):
Args:
test_record: A TestResultRecord object for the test class.
"""
+ test_record.update_record()
self.error.append(test_record)
def is_test_executed(self, test_name):
| Include stack traces of extra errors
Right now only the stack trace for the main error is recorded in `TeatResultRecord`.
We should store the stack traces for extra errors as well. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index a483a83..40f1bc4 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -301,9 +301,9 @@ class BaseTestClass(object):
executed.
"""
try:
- # Pass a copy of the record instead of the actual object so that it
- # will not be modified.
- func(copy.deepcopy(tr_record))
+ # Pass a copy of the record instead of the actual object so that it
+ # will not be modified.
+ func(copy.deepcopy(tr_record))
except signals.TestAbortAll:
raise
except Exception as e:
@@ -327,6 +327,7 @@ class BaseTestClass(object):
tr_record = records.TestResultRecord(test_name, self.TAG)
tr_record.test_begin()
logging.info('%s %s', TEST_CASE_TOKEN, test_name)
+ teardown_test_failed = False
try:
try:
self._setup_test(test_name)
@@ -348,6 +349,7 @@ class BaseTestClass(object):
except Exception as e:
logging.exception(e)
tr_record.add_error('teardown_test', e)
+ teardown_test_failed = True
except (signals.TestFailure, AssertionError) as e:
tr_record.test_fail(e)
except signals.TestSkip as e:
@@ -364,8 +366,10 @@ class BaseTestClass(object):
# Exception happened during test.
tr_record.test_error(e)
else:
- tr_record.test_pass()
+ if not teardown_test_failed:
+ tr_record.test_pass()
finally:
+ tr_record.update_record()
if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR,
records.TestResultEnums.TEST_RESULT_FAIL):
self._exec_procedure_func(self._on_fail, tr_record)
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 1cc1078..bd7dce9 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -250,7 +250,7 @@ class BaseTestTest(unittest.TestCase):
def test_teardown_test_assert_fail(self):
class MockBaseTest(base_test.BaseTestClass):
def teardown_test(self):
- asserts.assert_true(False, MSG_EXPECTED_EXCEPTION)
+ asserts.fail(MSG_EXPECTED_EXCEPTION)
def test_something(self):
pass
@@ -259,7 +259,7 @@ class BaseTestTest(unittest.TestCase):
bt_cls.run()
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
- self.assertIsNone(actual_record.details)
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
@@ -277,10 +277,9 @@ class BaseTestTest(unittest.TestCase):
bt_cls.run()
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
- self.assertIsNone(actual_record.details)
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
- expected_extra_error = {"teardown_test": MSG_EXPECTED_EXCEPTION}
- self.assertEqual(actual_record.extra_errors, expected_extra_error)
+ self.assertFalse(actual_record.extra_errors)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
@@ -372,10 +371,10 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
- my_mock.assert_called_once_with("on_fail")
+ my_mock.assert_called_once_with('on_fail')
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
- self.assertIsNone(actual_record.details)
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
@@ -463,8 +462,8 @@ class BaseTestTest(unittest.TestCase):
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
- self.assertEqual(actual_record.extra_errors,
- {'teardown_test': 'This is an expected exception.ha'})
+ self.assertEqual(actual_record.extra_errors['teardown_test'].details,
+ 'This is an expected exception.ha')
self.assertIsNone(actual_record.extras)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
@@ -504,12 +503,12 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
- actual_record = bt_cls.results.error[0]
+ actual_record = bt_cls.results.failed[0]
self.assertIn('_on_fail', actual_record.extra_errors)
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
- expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
+ expected_summary = ("Error 0, Executed 1, Failed 1, Passed 0, "
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
@@ -526,8 +525,8 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
actual_record = bt_cls.results.error[0]
- expected_extra_error = {'_on_pass': expected_msg}
- self.assertEqual(actual_record.extra_errors, expected_extra_error)
+ self.assertEqual(actual_record.extra_errors['_on_pass'].details,
+ expected_msg)
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
@@ -549,12 +548,58 @@ class BaseTestTest(unittest.TestCase):
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, "Test Body Exception.")
self.assertIsNone(actual_record.extras)
- self.assertEqual(actual_record.extra_errors["teardown_test"],
- "Details=This is an expected exception., Extras=None")
+ self.assertEqual(actual_record.extra_errors['teardown_test'].details,
+ MSG_EXPECTED_EXCEPTION)
+ self.assertIsNone(actual_record.extra_errors['teardown_test'].extras)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
+ def test_exception_objects_in_record(self):
+ """Checks that the exception objects are correctly tallied.
+ """
+ expected_termination_signal = Exception('Test Body Exception.')
+ expected_extra_error = Exception('teardown_test Exception.')
+
+ class MockBaseTest(base_test.BaseTestClass):
+ def teardown_test(self):
+ raise expected_extra_error
+
+ def test_something(self):
+ raise expected_termination_signal
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run()
+ actual_record = bt_cls.results.error[0]
+ self.assertIs(actual_record.termination_signal.exception,
+ expected_termination_signal)
+ self.assertIsNotNone(actual_record.termination_signal.stacktrace)
+ self.assertEqual(len(actual_record.extra_errors), 1)
+ extra_error = actual_record.extra_errors['teardown_test']
+ self.assertIs(extra_error.exception, expected_extra_error)
+ self.assertIsNotNone(extra_error.stacktrace)
+ self.assertIsNone(actual_record.extras)
+
+ def test_promote_extra_errors_to_termination_signal(self):
+ """If no termination singal is specified, use the first extra error as
+ the termination signal.
+ """
+ expected_extra_error = Exception('teardown_test Exception.')
+
+ class MockBaseTest(base_test.BaseTestClass):
+ def teardown_test(self):
+ raise expected_extra_error
+
+ def test_something(self):
+ pass
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run()
+ actual_record = bt_cls.results.error[0]
+ self.assertFalse(actual_record.extra_errors)
+ self.assertEqual(actual_record.details, 'teardown_test Exception.')
+ self.assertIsNotNone(actual_record.stacktrace)
+
def test_explicit_pass_but_teardown_test_raises_an_exception(self):
"""Test record result should be marked as ERROR as opposed to PASS.
"""
@@ -564,16 +609,17 @@ class BaseTestTest(unittest.TestCase):
asserts.assert_true(False, MSG_EXPECTED_EXCEPTION)
def test_something(self):
- asserts.explicit_pass("Test Passed!")
+ asserts.explicit_pass('Test Passed!')
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
- self.assertEqual(actual_record.details, "Test Passed!")
+ self.assertEqual(actual_record.details, 'Test Passed!')
self.assertIsNone(actual_record.extras)
- self.assertEqual(actual_record.extra_errors["teardown_test"],
- "Details=This is an expected exception., Extras=None")
+ self.assertEqual(actual_record.extra_errors['teardown_test'].details,
+ MSG_EXPECTED_EXCEPTION)
+ self.assertIsNone(actual_record.extra_errors['teardown_test'].extras)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
@@ -606,8 +652,8 @@ class BaseTestTest(unittest.TestCase):
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertEqual(actual_record.extras, MOCK_EXTRA)
- self.assertEqual(actual_record.extra_errors,
- {'_on_pass': MSG_EXPECTED_EXCEPTION})
+ self.assertEqual(actual_record.extra_errors['_on_pass'].details,
+ MSG_EXPECTED_EXCEPTION)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
@@ -622,14 +668,13 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
- actual_record = bt_cls.results.error[0]
- self.assertEqual(bt_cls.results.failed, [])
+ actual_record = bt_cls.results.failed[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertEqual(actual_record.extras, MOCK_EXTRA)
- self.assertEqual(actual_record.extra_errors,
- {'_on_fail': MSG_EXPECTED_EXCEPTION})
- expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
+ self.assertEqual(actual_record.extra_errors['_on_fail'].details,
+ MSG_EXPECTED_EXCEPTION)
+ expected_summary = ("Error 0, Executed 1, Failed 1, Passed 0, "
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py
index a5f68db..719f0c8 100755
--- a/tests/mobly/records_test.py
+++ b/tests/mobly/records_test.py
@@ -28,6 +28,7 @@ from tests.lib import utils
class RecordsTest(unittest.TestCase):
"""This test class tests the implementation of classes in mobly.records.
"""
+
def setUp(self):
self.tn = "test_name"
self.details = "Some details about the test execution."
@@ -38,7 +39,8 @@ class RecordsTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.tmp_path)
- def verify_record(self, record, result, details, extras):
+ def verify_record(self, record, result, details, extras, stacktrace=None):
+ record.update_record()
# Verify each field.
self.assertEqual(record.test_name, self.tn)
self.assertEqual(record.result, result)
@@ -58,9 +60,15 @@ class RecordsTest(unittest.TestCase):
d[records.TestResultEnums.RECORD_END_TIME] = record.end_time
d[records.TestResultEnums.RECORD_UID] = None
d[records.TestResultEnums.RECORD_CLASS] = None
- d[records.TestResultEnums.RECORD_EXTRA_ERRORS] = {}
- d[records.TestResultEnums.RECORD_STACKTRACE] = None
+ d[records.TestResultEnums.RECORD_EXTRA_ERRORS] = []
+ d[records.TestResultEnums.RECORD_STACKTRACE] = stacktrace
actual_d = record.to_dict()
+ # Verify stacktrace partially match as stacktraces often have file path
+ # in them.
+ if stacktrace:
+ stacktrace_key = records.TestResultEnums.RECORD_STACKTRACE
+ self.assertTrue(
+ d.pop(stacktrace_key) in actual_d.pop(stacktrace_key))
self.assertDictEqual(actual_d, d)
# Verify that these code paths do not cause crashes and yield non-empty
# results.
@@ -123,16 +131,14 @@ class RecordsTest(unittest.TestCase):
# Verify stacktrace separately if we expect a non-None value.
# Because stacktrace includes file names and line numbers, we can't do
# a simple equality check.
- self.assertTrue('Something failed.' in record.stacktrace)
- self.assertTrue(
- 'in test_result_record_fail_stacktrace\n raise Exception' in
- record.stacktrace)
- record.stacktrace = None
self.verify_record(
record=record,
result=records.TestResultEnums.TEST_RESULT_FAIL,
details='Something failed.',
- extras=None)
+ extras=None,
+ stacktrace='in test_result_record_fail_stacktrace\n '
+ 'raise Exception(\'Something failed.\')\nException: '
+ 'Something failed.\n')
def test_result_record_fail_with_float_extra(self):
record = records.TestResultRecord(self.tn)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@e5df6ca2918e21233d3d207207964b82319c9f9d#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_exception_objects_in_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_cannot_modify_original_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_cannot_modify_original_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_procedure_function_gets_correct_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_promote_extra_errors_to_termination_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_stacktrace",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_json_extra"
] | [
"tests/mobly/records_test.py::RecordsTest::test_summary_write_dump"
] | [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_negative",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_with_add_class_error",
"tests/mobly/records_test.py::RecordsTest::test_is_test_executed",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_special_error",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_test_signal",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_success",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_type_mismatch"
] | [] | Apache License 2.0 | 1,541 | 2,406 | [
"mobly/records.py"
] |
nipy__nipype-2139 | d9b183b046815e836ce285eab894928c8849b27c | 2017-08-01 21:15:47 | 14161a590a3166b5a9c0f4afd42ff1acf843a960 | diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py
index 2f8b1bf0e..19cf9ccaa 100644
--- a/nipype/interfaces/base.py
+++ b/nipype/interfaces/base.py
@@ -32,6 +32,7 @@ from warnings import warn
import simplejson as json
from dateutil.parser import parse as parseutc
from packaging.version import Version
+import collections
from .. import config, logging, LooseVersion, __version__
from ..utils.provenance import write_provenance
@@ -2058,9 +2059,15 @@ class MultiPath(traits.List):
"""
def validate(self, object, name, value):
+
+ # want to treat range and other sequences (except str) as list
+ if not isinstance(value, (str, bytes)) and isinstance(value, collections.Sequence):
+ value = list(value)
+
if not isdefined(value) or \
(isinstance(value, list) and len(value) == 0):
return Undefined
+
newvalue = value
if not isinstance(value, list) \
| iterfield in MapNode can't be range (py3)
### Summary
In py3 `range` is not a list, and if iterfield in `MapNode` is defined as `range` than entire range is send to the interface at once.
### Script/Workflow details
```
from nipype import Function, MapNode
def square_func(x):
print(x, type(x))
return x ** 2
square = Function(["x"], ["f_x"], square_func)
square_node = MapNode(square, name="square", iterfield=["x"])
square_node.inputs.x = range(4)
res = square_node.run()
```
### Actual behavior
print statement from `square_func` gives `range(0, 4) <class 'range'>`
### Expected behavior
print statement from `square_func` should give `0 <class 'int'>`, etc.
| nipy/nipype | diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py
index e2624d03c..cece44444 100644
--- a/nipype/pipeline/engine/tests/test_engine.py
+++ b/nipype/pipeline/engine/tests/test_engine.py
@@ -456,6 +456,24 @@ def test_mapnode_iterfield_check():
with pytest.raises(ValueError): mod1._check_iterfield()
[email protected]("x_inp, f_exp", [
+ (3, [6]), ([2, 3], [4, 6]), ((2, 3), [4, 6]),
+ (range(3), [0, 2, 4]),
+ ("Str", ["StrStr"]), (["Str1", "Str2"], ["Str1Str1", "Str2Str2"])
+ ])
+def test_mapnode_iterfield_type(x_inp, f_exp):
+ from nipype import MapNode, Function
+ def double_func(x):
+ return 2 * x
+ double = Function(["x"], ["f_x"], double_func)
+
+ double_node = MapNode(double, name="double", iterfield=["x"])
+ double_node.inputs.x = x_inp
+
+ res = double_node.run()
+ assert res.outputs.f_x == f_exp
+
+
def test_mapnode_nested(tmpdir):
os.chdir(str(tmpdir))
from nipype import MapNode, Function
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
configparser==5.2.0
decorator==4.4.2
funcsigs==1.0.2
future==1.0.0
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
lxml==5.3.1
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@d9b183b046815e836ce285eab894928c8849b27c#egg=nipype
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydotplus==2.0.2
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
rdflib==5.0.0
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- configparser==5.2.0
- decorator==4.4.2
- funcsigs==1.0.2
- future==1.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- lxml==5.3.1
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydotplus==2.0.2
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- rdflib==5.0.0
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_iterfield_type[x_inp2-f_exp2]",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_iterfield_type[x_inp3-f_exp3]"
] | [
"nipype/pipeline/engine/tests/test_engine.py::test_1mod[iterables0-expected0]",
"nipype/pipeline/engine/tests/test_engine.py::test_1mod[iterables1-expected1]",
"nipype/pipeline/engine/tests/test_engine.py::test_2mods[iterables0-expected0]",
"nipype/pipeline/engine/tests/test_engine.py::test_2mods[iterables1-expected1]",
"nipype/pipeline/engine/tests/test_engine.py::test_2mods[iterables2-expected2]",
"nipype/pipeline/engine/tests/test_engine.py::test_3mods[iterables0-expected0-connect0]",
"nipype/pipeline/engine/tests/test_engine.py::test_3mods[iterables1-expected1-connect1]",
"nipype/pipeline/engine/tests/test_engine.py::test_3mods[iterables2-expected2-connect2]",
"nipype/pipeline/engine/tests/test_engine.py::test_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_iterable_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_synchronize_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_synchronize_tuples_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_itersource_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_itersource_synchronize1_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_itersource_synchronize2_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_disconnect",
"nipype/pipeline/engine/tests/test_engine.py::test_doubleconnect",
"nipype/pipeline/engine/tests/test_engine.py::test_node_hash",
"nipype/pipeline/engine/tests/test_engine.py::test_old_config",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_json",
"nipype/pipeline/engine/tests/test_engine.py::test_parameterize_dirs_false",
"nipype/pipeline/engine/tests/test_engine.py::test_serial_input",
"nipype/pipeline/engine/tests/test_engine.py::test_write_graph_runs",
"nipype/pipeline/engine/tests/test_engine.py::test_deep_nested_write_graph_runs"
] | [
"nipype/pipeline/engine/tests/test_engine.py::test_init",
"nipype/pipeline/engine/tests/test_engine.py::test_connect",
"nipype/pipeline/engine/tests/test_engine.py::test_add_nodes",
"nipype/pipeline/engine/tests/test_engine.py::test_node_init",
"nipype/pipeline/engine/tests/test_engine.py::test_workflow_add",
"nipype/pipeline/engine/tests/test_engine.py::test_node_get_output",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_iterfield_check",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_iterfield_type[3-f_exp0]",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_iterfield_type[x_inp1-f_exp1]",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_iterfield_type[Str-f_exp4]",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_iterfield_type[x_inp5-f_exp5]",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_nested",
"nipype/pipeline/engine/tests/test_engine.py::test_mapnode_expansion",
"nipype/pipeline/engine/tests/test_engine.py::test_io_subclass"
] | [] | Apache License 2.0 | 1,545 | 258 | [
"nipype/interfaces/base.py"
] |
|
Duke-GCB__DukeDSClient-158 | 3137fc3d94da3754c85715f3912bbebbe22d19be | 2017-08-04 16:27:12 | bffebebd86d09f5924461959401ef3698b4e47d5 | diff --git a/ddsc/core/util.py b/ddsc/core/util.py
index 8307bb0..1af9afc 100644
--- a/ddsc/core/util.py
+++ b/ddsc/core/util.py
@@ -301,8 +301,7 @@ def verify_terminal_encoding(encoding):
Raises ValueError with error message when terminal encoding is not Unicode(contains UTF ignoring case).
:param encoding: str: encoding we want to check
"""
- encoding = encoding or ''
- if not ("UTF" in encoding.upper()):
+ if encoding and not ("UTF" in encoding.upper()):
raise ValueError(TERMINAL_ENCODING_NOT_UTF_ERROR)
| Checks for terminal encoding prevents piping/redirection
Noticed when I was reviewing #153 that I can't pipe the output of ddsclient anywhere because it fails the terminal encoding check:
```
$ ddsclient list > projects
ERROR: DukeDSClient requires UTF terminal encoding.
Follow this guide for adjusting your terminal encoding:
https://github.com/Duke-GCB/DukeDSClient/blob/master/docs/UnicodeTerminalSetup.md
```
I can force with `PYTHONIOENCODING=UTF ddsclient list > projects`
But that's not very unix-y. I've also seen this issue on cluster jobs - trying to srun ddsclient. Had to force the PYTHONIOENCODING there too. | Duke-GCB/DukeDSClient | diff --git a/ddsc/core/tests/test_util.py b/ddsc/core/tests/test_util.py
index dc20606..4d42be9 100644
--- a/ddsc/core/tests/test_util.py
+++ b/ddsc/core/tests/test_util.py
@@ -16,13 +16,11 @@ class TestUtil(TestCase):
with self.assertRaises(ValueError):
verify_terminal_encoding('ascii')
- def test_verify_terminal_encoding_empty_raises(self):
- with self.assertRaises(ValueError):
- verify_terminal_encoding('')
+ def test_verify_terminal_encoding_empty_is_ok(self):
+ verify_terminal_encoding('')
- def test_verify_terminal_encoding_none_raises(self):
- with self.assertRaises(ValueError):
- verify_terminal_encoding(None)
+ def test_verify_terminal_encoding_none_is_ok(self):
+ verify_terminal_encoding(None)
class TestProgressBar(TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage",
"mock",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
-e git+https://github.com/Duke-GCB/DukeDSClient.git@3137fc3d94da3754c85715f3912bbebbe22d19be#egg=DukeDSClient
exceptiongroup==1.2.2
flake8==7.2.0
future==0.16.0
iniconfig==2.1.0
mccabe==0.7.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
pytz==2025.2
PyYAML==3.12
requests==2.13.0
six==1.10.0
tomli==2.2.1
| name: DukeDSClient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- flake8==7.2.0
- future==0.16.0
- iniconfig==2.1.0
- mccabe==0.7.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pytz==2025.2
- pyyaml==3.12
- requests==2.13.0
- six==1.10.0
- tomli==2.2.1
prefix: /opt/conda/envs/DukeDSClient
| [
"ddsc/core/tests/test_util.py::TestUtil::test_verify_terminal_encoding_empty_is_ok",
"ddsc/core/tests/test_util.py::TestUtil::test_verify_terminal_encoding_none_is_ok"
] | [] | [
"ddsc/core/tests/test_util.py::TestUtil::test_verify_terminal_encoding_ascii_raises",
"ddsc/core/tests/test_util.py::TestUtil::test_verify_terminal_encoding_lower",
"ddsc/core/tests/test_util.py::TestUtil::test_verify_terminal_encoding_upper",
"ddsc/core/tests/test_util.py::TestProgressBar::test_show_no_waiting",
"ddsc/core/tests/test_util.py::TestProgressBar::test_show_with_waiting",
"ddsc/core/tests/test_util.py::TestProgressPrinter::test_stuff"
] | [] | MIT License | 1,554 | 150 | [
"ddsc/core/util.py"
] |
|
ni__nixnet-python-179 | 2b0b3bdef21f1953110a2a23a7fe4a5b60a4d29c | 2017-08-07 23:02:20 | 5590c92682e9a33adc823396a3565fa4dbb47055 | coveralls:
[](https://coveralls.io/builds/12733148)
Coverage remained the same at 64.175% when pulling **fbb2a886dbceb309684decb37c4785c427a83072 on epage:mapping** into **5590c92682e9a33adc823396a3565fa4dbb47055 on ni:master**.
coveralls:
[](https://coveralls.io/builds/12733182)
Coverage remained the same at 64.175% when pulling **fbb2a886dbceb309684decb37c4785c427a83072 on epage:mapping** into **5590c92682e9a33adc823396a3565fa4dbb47055 on ni:master**.
coveralls:
[](https://coveralls.io/builds/12744517)
Coverage remained the same at 64.175% when pulling **e2e8f3eaba54e918193a6816a0d89f3d87441593 on epage:mapping** into **5590c92682e9a33adc823396a3565fa4dbb47055 on ni:master**.
coveralls:
[](https://coveralls.io/builds/12744517)
Coverage remained the same at 64.175% when pulling **e2e8f3eaba54e918193a6816a0d89f3d87441593 on epage:mapping** into **5590c92682e9a33adc823396a3565fa4dbb47055 on ni:master**.
| diff --git a/nixnet/_session/collection.py b/nixnet/_session/collection.py
index 60aa361..7929db3 100644
--- a/nixnet/_session/collection.py
+++ b/nixnet/_session/collection.py
@@ -12,7 +12,7 @@ from nixnet import _props
@six.add_metaclass(abc.ABCMeta)
-class Collection(collections.Mapping):
+class Collection(collections.Sequence):
"""Collection of items in a session."""
def __init__(self, handle):
@@ -42,7 +42,6 @@ class Collection(collections.Mapping):
raise TypeError(index)
def __getitem__(self, index):
- # type: (typing.Union[int, typing.Text]) -> Item
if isinstance(index, six.integer_types):
name = self._list_cache[index]
elif isinstance(index, six.string_types):
@@ -59,6 +58,12 @@ class Collection(collections.Mapping):
def get(self, index, default=None):
# type: (typing.Union[int, typing.Text], typing.Any) -> Item
+ """Access an item, returning ``default`` on failure.
+
+ Args:
+ index(str or int): Item name or index
+ default: Value to return when lookup fails
+ """
if isinstance(index, six.integer_types):
try:
name = self._list_cache[index]
diff --git a/nixnet/_session/signals.py b/nixnet/_session/signals.py
index f0af203..ed31b22 100644
--- a/nixnet/_session/signals.py
+++ b/nixnet/_session/signals.py
@@ -5,6 +5,7 @@ from __future__ import print_function
import typing # NOQA: F401
from nixnet import _funcs
+from nixnet import _props
from nixnet._session import collection
@@ -18,6 +19,20 @@ class Signals(collection.Collection):
def _create_item(self, handle, index, name):
return Signal(handle, index, name)
+ @property
+ def resamp_rate(self):
+ # type: () -> float
+ """float: Rate used to resample frame data to/from signal data in waveforms.
+
+ The units are in Hertz (samples per second).
+ """
+ return _props.get_session_resamp_rate(self._handle)
+
+ @resamp_rate.setter
+ def resamp_rate(self, value):
+ # type: (float) -> None
+ _props.set_session_resamp_rate(self._handle, value)
+
class SinglePointInSignals(Signals):
"""Writeable signals in a session."""
diff --git a/nixnet/session.py b/nixnet/session.py
index 6849e40..0600160 100644
--- a/nixnet/session.py
+++ b/nixnet/session.py
@@ -312,7 +312,7 @@ class FrameOutQueuedSession(base.SessionBase):
@property
def frames(self):
# type: () -> session_frames.OutFrames
- """:any:`nixnet._session.frames.OutFrames`: Operate on session's frames"""
+ """:any:`nixnet._session.frames.InFrames`: Operate on session's frames"""
return self._frames
diff --git a/nixnet/types.py b/nixnet/types.py
index a668da9..715c6e5 100644
--- a/nixnet/types.py
+++ b/nixnet/types.py
@@ -231,7 +231,7 @@ class RawFrame(Frame):
"info",
"payload"]
- def __init__(self, timestamp, identifier, type, flags=0, info=0, payload=b""):
+ def __init__(self, timestamp, identifier, type, flags, info, payload=b""):
# type: (int, int, constants.FrameType, int, int, bytes) -> None
self.timestamp = timestamp
self.identifier = identifier
@@ -271,24 +271,14 @@ class RawFrame(Frame):
"""RawFrame debug representation.
>>> RawFrame(1, 2, constants.FrameType.CAN_DATA, 3, 4)
- RawFrame(timestamp=0x1, identifier=0x2, type=FrameType.CAN_DATA, flags=0x3, info=0x4)
+ RawFrame(timestamp=0x1, identifier=0x2, type=FrameType.CAN_DATA, flags=0x3, info=0x4, payload=...)
"""
- optional = []
- if self.flags != 0:
- optional.append('flags=0x{:x}'.format(self.flags))
- if self.info != 0:
- optional.append('info=0x{:x}'.format(self.info))
- if self.payload:
- optional.append('len(payload)={}'.format(len(self.payload)))
- if optional:
- optional_params = ', {}'.format(", ".join(optional))
- else:
- optional_params = ''
- return "RawFrame(timestamp=0x{:x}, identifier=0x{:x}, type={}{})".format(
+ return "RawFrame(timestamp=0x{:x}, identifier=0x{:x}, type={}, flags=0x{:x}, info=0x{:x}, payload=...)".format(
self.timestamp,
self.identifier,
self.type,
- optional_params)
+ self.flags,
+ self.info)
class CanFrame(Frame):
@@ -310,7 +300,7 @@ class CanFrame(Frame):
"timestamp",
"payload"]
- def __init__(self, identifier, type=constants.FrameType.CAN_DATA, payload=b""):
+ def __init__(self, identifier, type, payload=b""):
# type: (typing.Union[CanIdentifier, int], constants.FrameType, bytes) -> None
if isinstance(identifier, int):
self.identifier = CanIdentifier(identifier)
@@ -327,7 +317,7 @@ class CanFrame(Frame):
>>> raw = RawFrame(5, 0x20000001, constants.FrameType.CAN_DATA, _cconsts.NX_FRAME_FLAGS_TRANSMIT_ECHO, 0, b'')
>>> CanFrame.from_raw(raw)
- CanFrame(CanIdentifier(0x1, extended=True), echo=True, timestamp=0x5)
+ CanFrame(CanIdentifier(0x1, extended=True), echo=True, type=FrameType.CAN_DATA, timestamp=0x5, payload=...)
"""
identifier = CanIdentifier.from_raw(frame.identifier)
can_frame = CanFrame(identifier, constants.FrameType(frame.type), frame.payload)
@@ -339,11 +329,11 @@ class CanFrame(Frame):
"""Convert to RawFrame.
>>> CanFrame(CanIdentifier(1, True), constants.FrameType.CAN_DATA).to_raw()
- RawFrame(timestamp=0x0, identifier=0x20000001, type=FrameType.CAN_DATA)
+ RawFrame(timestamp=0x0, identifier=0x20000001, type=FrameType.CAN_DATA, flags=0x0, info=0x0, payload=...)
>>> c = CanFrame(CanIdentifier(1, True), constants.FrameType.CAN_DATA)
>>> c.echo = True
>>> c.to_raw()
- RawFrame(timestamp=0x0, identifier=0x20000001, type=FrameType.CAN_DATA, flags=0x80)
+ RawFrame(timestamp=0x0, identifier=0x20000001, type=FrameType.CAN_DATA, flags=0x80, info=0x0, payload=...)
"""
identifier = int(self.identifier)
flags = 0
@@ -371,27 +361,14 @@ class CanFrame(Frame):
# type: () -> typing.Text
"""CanFrame debug representation.
- >>> CanFrame(1)
- CanFrame(CanIdentifier(0x1))
- >>> CanFrame(1, constants.FrameType.CANFD_DATA, b'\x01')
- CanFrame(CanIdentifier(0x1), type=FrameType.CANFD_DATA, len(payload)=1)
+ >>> CanFrame(1, constants.FrameType.CAN_DATA)
+ CanFrame(CanIdentifier(0x1), echo=False, type=FrameType.CAN_DATA, timestamp=0x0, payload=...)
"""
- optional = []
- if self.echo:
- optional.append('echo={}'.format(self.echo))
- if self.type != constants.FrameType.CAN_DATA:
- optional.append('type={}'.format(self.type))
- if self.timestamp != 0:
- optional.append('timestamp=0x{:x}'.format(self.timestamp))
- if self.payload:
- optional.append('len(payload)={}'.format(len(self.payload)))
- if optional:
- optional_params = ', {}'.format(", ".join(optional))
- else:
- optional_params = ''
- return "CanFrame({}{})".format(
+ return "CanFrame({}, echo={}, type={}, timestamp=0x{:x}, payload=...)".format(
self.identifier,
- optional_params)
+ self.echo,
+ self.type,
+ self.timestamp)
class CanBusErrorFrame(Frame):
@@ -446,7 +423,7 @@ class CanBusErrorFrame(Frame):
"""Convert to RawFrame.
>>> CanBusErrorFrame(100, constants.CanCommState.BUS_OFF, True, constants.CanLastErr.STUFF, 1, 2).to_raw()
- RawFrame(timestamp=0x64, identifier=0x0, type=FrameType.CAN_BUS_ERROR, len(payload)=5)
+ RawFrame(timestamp=0x64, identifier=0x0, type=FrameType.CAN_BUS_ERROR, flags=0x0, info=0x0, payload=...)
"""
identifier = 0
flags = 0
@@ -459,7 +436,7 @@ class CanBusErrorFrame(Frame):
self.tx_err_count,
self.rx_err_count,
]
- payload = bytes(bytearray(payload_data))
+ payload = bytes(payload_data)
return RawFrame(self.timestamp, identifier, self.type, flags, info, payload)
@property
@@ -526,7 +503,7 @@ class DelayFrame(Frame):
"""Convert to RawFrame.
>>> DelayFrame(250).to_raw()
- RawFrame(timestamp=0xfa, identifier=0x0, type=FrameType.SPECIAL_DELAY)
+ RawFrame(timestamp=0xfa, identifier=0x0, type=FrameType.SPECIAL_DELAY, flags=0x0, info=0x0, payload=...)
"""
identifier = 0
flags = 0
@@ -590,7 +567,7 @@ class LogTriggerFrame(Frame):
"""Convert to RawFrame.
>>> LogTriggerFrame(250).to_raw()
- RawFrame(timestamp=0xfa, identifier=0x0, type=FrameType.SPECIAL_LOG_TRIGGER)
+ RawFrame(timestamp=0xfa, identifier=0x0, type=FrameType.SPECIAL_LOG_TRIGGER, flags=0x0, info=0x0, payload=...)
"""
identifier = 0
flags = 0
@@ -650,7 +627,7 @@ class StartTriggerFrame(Frame):
"""Convert to RawFrame.
>>> StartTriggerFrame(250).to_raw()
- RawFrame(timestamp=0xfa, identifier=0x0, type=FrameType.SPECIAL_START_TRIGGER)
+ RawFrame(timestamp=0xfa, identifier=0x0, type=FrameType.SPECIAL_START_TRIGGER, flags=0x0, info=0x0, payload=...)
"""
identifier = 0
flags = 0
diff --git a/nixnet_examples/can_frame_queued_io.py b/nixnet_examples/can_frame_queued_io.py
index ba9b893..0dfcdcf 100644
--- a/nixnet_examples/can_frame_queued_io.py
+++ b/nixnet_examples/can_frame_queued_io.py
@@ -2,15 +2,18 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import time
-
+import pprint
import six
+import time
import nixnet
from nixnet import constants
from nixnet import types
+pp = pprint.PrettyPrinter(indent=4)
+
+
def main():
database_name = 'NIXNET_example'
cluster_name = 'CAN_Cluster'
@@ -72,8 +75,8 @@ def main():
count = 1
frames = input_session.frames.read(count)
for frame in frames:
- print('Received frame: {}'.format(frame))
- print(' payload={}'.format(list(six.iterbytes(frame.payload))))
+ print('Received frame: ')
+ pp.pprint(frame)
i += 1
if max(payload) + i > 0xFF:
diff --git a/nixnet_examples/can_frame_stream_io.py b/nixnet_examples/can_frame_stream_io.py
index 464455e..60769d2 100644
--- a/nixnet_examples/can_frame_stream_io.py
+++ b/nixnet_examples/can_frame_stream_io.py
@@ -2,15 +2,18 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import time
-
+import pprint
import six
+import time
import nixnet
from nixnet import constants
from nixnet import types
+pp = pprint.PrettyPrinter(indent=4)
+
+
def main():
interface1 = 'CAN1'
interface2 = 'CAN2'
@@ -64,8 +67,8 @@ def main():
count = 1
frames = input_session.frames.read(count)
for frame in frames:
- print('Received frame: {}'.format(frame))
- print(' payload={}'.format(list(six.iterbytes(frame.payload))))
+ print('Received frame: ')
+ pp.pprint(frame)
i += 1
if max(payload) + i > 0xFF:
diff --git a/nixnet_examples/can_signal_conversion.py b/nixnet_examples/can_signal_conversion.py
index 98cd6c1..2378577 100644
--- a/nixnet_examples/can_signal_conversion.py
+++ b/nixnet_examples/can_signal_conversion.py
@@ -2,11 +2,15 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import pprint
import six
from nixnet import convert
+pp = pprint.PrettyPrinter(indent=4)
+
+
def main():
database_name = 'NIXNET_example'
cluster_name = 'CAN_Cluster'
@@ -31,11 +35,12 @@ def main():
frames = session.convert_signals_to_frames(expected_signals)
print('Frames:')
for frame in frames:
- print(' {}'.format(frame))
- print(' payload={}'.format(list(six.iterbytes(frame.payload))))
+ print(' {}'.format(pp.pformat(frame)))
converted_signals = session.convert_frames_to_signals(frames)
- print('Signals: {}'.format([v for (_, v) in converted_signals]))
+ print('Signals:')
+ for expected, (_, converted) in zip(expected_signals, converted_signals):
+ print(' {} {}'.format(expected, converted))
if __name__ == '__main__':
diff --git a/nixnet_examples/can_signal_single_point_io.py b/nixnet_examples/can_signal_single_point_io.py
index e9a5725..4b031c4 100644
--- a/nixnet_examples/can_signal_single_point_io.py
+++ b/nixnet_examples/can_signal_single_point_io.py
@@ -3,15 +3,18 @@ from __future__ import division
from __future__ import print_function
import datetime
+import pprint
+import six
import sys
import time
-import six
-
import nixnet
from nixnet import constants
+pp = pprint.PrettyPrinter(indent=4)
+
+
def convert_timestamp(timestamp):
system_epoch = time.gmtime(0)
system_epock_datetime = datetime.datetime(system_epoch.tm_year, system_epoch.tm_mon, system_epoch.tm_mday)
@@ -81,7 +84,7 @@ def main():
signals = input_session.signals.read()
for timestamp, value in signals:
date = convert_timestamp(timestamp)
- print('Received signal with timestamp {} and value {}'.format(date, value))
+ print('Received signal with timepstamp {} and value {}'.format(date, value))
i += 1
if max(value_buffer) + i > sys.float_info.max:
| `session.signals` and `session.frames` might not correctly be implementing Mapping semantics
These containers allow someone to
- Iterate on the contained signals
- Lookup signals by index and name (but not slice)
They were [defined](https://github.com/ni/nixnet-python/blob/master/nixnet/_session/collection.py) as implementing the [`Mapping`](https://docs.python.org/3/library/collections.abc.html) abc. The concern is that when you iterate on other mappings, you get the keys back. To iterate on the values, you need to call `mapping.values()`. | ni/nixnet-python | diff --git a/tests/test_examples.py b/tests/test_examples.py
index 1e4f532..6fe9400 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -2,7 +2,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import copy
import mock # type: ignore
import pytest # type: ignore
@@ -32,8 +31,6 @@ MockXnetLibrary.nx_clear.return_value = _ctypedefs.u32(0)
def six_input(queue):
- # Leave `input_values` alone for easier debugging
- queue = copy.copy(queue)
queue.reverse()
def _six_input(prompt=""):
diff --git a/tests/test_frames.py b/tests/test_frames.py
index 0380ade..ce8d50b 100644
--- a/tests/test_frames.py
+++ b/tests/test_frames.py
@@ -35,7 +35,7 @@ def test_iterate_frames_with_empty_payload():
payload = b'\x00\x00\x00\x00\x00\x00\x00\x00'
empty_bytes = b'\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x04\x05\x00' + payload
(empty_frame, ) = list(_frames.iterate_frames(empty_bytes))
- assert repr(empty_frame) == 'RawFrame(timestamp=0x1, identifier=0x2, type=FrameType.CAN_DATA, flags=0x4, info=0x5)'
+ assert repr(empty_frame) == 'RawFrame(timestamp=0x1, identifier=0x2, type=FrameType.CAN_DATA, flags=0x4, info=0x5, payload=...)' # NOQA: E501
assert empty_frame.payload == b''
@@ -43,14 +43,14 @@ def test_iterate_frames_with_base_payload():
payload = b'\x01\x02\x03\x04\x05\x06\x07\x08'
base_bytes = b'\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x08\x09\x08' + payload
(base_frame, ) = list(_frames.iterate_frames(base_bytes))
- assert repr(base_frame) == 'RawFrame(timestamp=0x6, identifier=0x7, type=FrameType.CAN_DATA, flags=0x8, info=0x9, len(payload)=8)' # NOQA: E501
+ assert repr(base_frame) == 'RawFrame(timestamp=0x6, identifier=0x7, type=FrameType.CAN_DATA, flags=0x8, info=0x9, payload=...)' # NOQA: E501
assert base_frame.payload == b'\x01\x02\x03\x04\x05\x06\x07\x08'
def test_iterate_frames_with_partial_base_payload():
frame_bytes = b'\xd8\xb7@B\xeb\xff\xd2\x01\x00\x00\x00\x00\x00\x00\x00\x04\x02\x04\x08\x10\x00\x00\x00\x00'
(frame, ) = list(_frames.iterate_frames(frame_bytes))
- assert repr(frame) == 'RawFrame(timestamp=0x1d2ffeb4240b7d8, identifier=0x0, type=FrameType.CAN_DATA, len(payload)=4)' # NOQA: E501
+ assert repr(frame) == 'RawFrame(timestamp=0x1d2ffeb4240b7d8, identifier=0x0, type=FrameType.CAN_DATA, flags=0x0, info=0x0, payload=...)' # NOQA: E501
assert frame.payload == b'\x02\x04\x08\x10'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 8
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"mock",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
execnet==1.9.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/ni/nixnet-python.git@2b0b3bdef21f1953110a2a23a7fe4a5b60a4d29c#egg=nixnet
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: nixnet-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- execnet==1.9.0
- mock==5.2.0
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- six==1.17.0
- tomli==1.2.3
prefix: /opt/conda/envs/nixnet-python
| [
"tests/test_frames.py::test_iterate_frames_with_empty_payload",
"tests/test_frames.py::test_iterate_frames_with_base_payload",
"tests/test_frames.py::test_iterate_frames_with_partial_base_payload"
] | [
"tests/test_frames.py::test_stream_loopback",
"tests/test_frames.py::test_queued_loopback",
"tests/test_frames.py::test_singlepoint_loopback",
"tests/test_frames.py::test_session_frames_container",
"tests/test_frames.py::test_session_frames_properties",
"tests/test_frames.py::test_session_frame_container"
] | [
"tests/test_examples.py::test_can_frame_queued_empty_session[input_values0]",
"tests/test_examples.py::test_can_frame_queued_empty_session[input_values1]",
"tests/test_examples.py::test_can_frame_queued_empty_session[input_values2]",
"tests/test_examples.py::test_can_frame_queued_empty_session[input_values3]",
"tests/test_examples.py::test_can_frame_queued_empty_session[input_values4]",
"tests/test_examples.py::test_can_frame_stream_empty_session[input_values0]",
"tests/test_examples.py::test_can_frame_stream_empty_session[input_values1]",
"tests/test_examples.py::test_can_frame_stream_empty_session[input_values2]",
"tests/test_examples.py::test_can_frame_stream_empty_session[input_values3]",
"tests/test_examples.py::test_can_frame_stream_empty_session[input_values4]",
"tests/test_examples.py::test_can_signal_single_point_empty_session[input_values0]",
"tests/test_examples.py::test_can_signal_single_point_empty_session[input_values1]",
"tests/test_examples.py::test_can_signal_single_point_empty_session[input_values2]",
"tests/test_examples.py::test_can_signal_single_point_empty_session[input_values3]",
"tests/test_examples.py::test_can_signal_single_point_empty_session[input_values4]",
"tests/test_examples.py::test_can_signal_single_point_empty_session[input_values5]",
"tests/test_examples.py::test_can_signal_single_point_empty_session[input_values6]",
"tests/test_examples.py::test_can_signal_conversion_empty_session[input_values0]",
"tests/test_examples.py::test_can_signal_conversion_empty_session[input_values1]",
"tests/test_examples.py::test_can_signal_conversion_empty_session[input_values2]",
"tests/test_frames.py::test_iterate_frames_with_multiple_frames",
"tests/test_frames.py::test_iterate_frames_corrupted_frame",
"tests/test_frames.py::test_can_identifier_equality",
"tests/test_frames.py::test_can_identifier_overflow",
"tests/test_frames.py::test_can_identifier_extended_overflow",
"tests/test_frames.py::test_raw_frame_equality",
"tests/test_frames.py::test_raw_frame_conversion",
"tests/test_frames.py::test_can_frame_equality",
"tests/test_frames.py::test_can_bus_error_frame_equality",
"tests/test_frames.py::test_delay_frame_equality",
"tests/test_frames.py::test_log_trigger_frame_equality",
"tests/test_frames.py::test_start_trigger_frame_equality",
"tests/test_frames.py::test_serialize_frame_with_empty_payload",
"tests/test_frames.py::test_serialize_frame_with_base_payload",
"tests/test_frames.py::test_serialize_frame_with_payload_unit",
"tests/test_frames.py::test_serialize_frame_with_excessive_payload"
] | [] | MIT License | 1,570 | 3,880 | [
"nixnet/_session/collection.py",
"nixnet/_session/signals.py",
"nixnet/session.py",
"nixnet/types.py",
"nixnet_examples/can_frame_queued_io.py",
"nixnet_examples/can_frame_stream_io.py",
"nixnet_examples/can_signal_conversion.py",
"nixnet_examples/can_signal_single_point_io.py"
] |
acorg__slurm-pipeline-28 | 2aec1fd60f0765a26555768ec15719dcf973f294 | 2017-08-11 19:05:12 | 2aec1fd60f0765a26555768ec15719dcf973f294 | diff --git a/setup.py b/setup.py
index 68cd7fc..e20f87c 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
from setuptools import setup
setup(name='slurm-pipeline',
- version='1.1.8',
+ version='1.1.9',
packages=['slurm_pipeline'],
include_package_data=True,
url='https://github.com/acorg/slurm-pipeline',
diff --git a/slurm_pipeline/pipeline.py b/slurm_pipeline/pipeline.py
index e8c6eaf..443c1ae 100644
--- a/slurm_pipeline/pipeline.py
+++ b/slurm_pipeline/pipeline.py
@@ -281,11 +281,28 @@ class SlurmPipeline(SlurmPipelineBase):
@param step: A C{dict} with a job specification.
@param env: A C{str} key to C{str} value environment for the script.
@param args: A C{list} of command-line arguments.
- @raise SchedulingError: If a script outputs a task name more than once.
+ @raise SchedulingError: If a script outputs a task name more than once
+ or if the step script cannot be executed.
"""
- step['stdout'] = subprocess.check_output(
- [step['script']] + args, cwd=step.get('cwd', '.'), env=env,
- stdin=DEVNULL, universal_newlines=True)
+ try:
+ step['stdout'] = subprocess.check_output(
+ [step['script']] + args, cwd=step.get('cwd', '.'), env=env,
+ stdin=DEVNULL, universal_newlines=True)
+ except subprocess.CalledProcessError as e:
+ import sys
+ if sys.version_info >= (3, 5):
+ raise SchedulingError(
+ "Could not execute step '%s' script '%s' in directory "
+ "'%s'. Attempted command: '%s'. Exit status: %s. Standard "
+ "output: '%s'. Standard error: '%s'." % (
+ step['name'], step['script'], step.get('cwd', '.'),
+ e.cmd, e.returncode, e.output, e.stderr))
+ else:
+ raise SchedulingError(
+ "Could not execute step '%s' script '%s' in directory "
+ "'%s'. Attempted command: '%s'. Exit status: %s."
+ % (step['name'], step['script'], step.get('cwd', '.'),
+ e.cmd, e.returncode))
# Look at all output lines for task names and SLURM job ids created
# (if any) by this script. Ignore any non-matching output.
| Improve error output when running step scripts
Catch a subprocess error and print the stdout and stderr of the offending script. | acorg/slurm-pipeline | diff --git a/test/test_pipeline.py b/test/test_pipeline.py
index 4c85b14..a392015 100644
--- a/test/test_pipeline.py
+++ b/test/test_pipeline.py
@@ -3,6 +3,8 @@ from unittest import TestCase
from six import assertRaisesRegex
from json import dumps
import platform
+from subprocess import CalledProcessError
+from sys import version_info
from slurm_pipeline.pipeline import SlurmPipeline, DEVNULL
from slurm_pipeline.error import SchedulingError, SpecificationError
@@ -797,7 +799,8 @@ class TestSlurmPipeline(TestCase):
@patch('subprocess.check_output')
@patch('os.access')
@patch('os.path.exists')
- def testCwdWithRelativeScriptPath(self, existsMock, accessMock,
+ @patch('os.path.isdir')
+ def testCwdWithRelativeScriptPath(self, isdirMock, existsMock, accessMock,
subprocessMock):
"""
If a step has a cwd set and its script is a relative path, the path of
@@ -1682,3 +1685,41 @@ class TestSlurmPipeline(TestCase):
"\[-10000, 10000\] range$")
assertRaisesRegex(self, SchedulingError, error, sp.schedule,
nice=-10001)
+
+ @patch('subprocess.check_output')
+ @patch('os.access')
+ @patch('os.path.exists')
+ @patch('os.path.isdir')
+ def testSubprocessExecRaises(self, isdirMock, existsMock, accessMock,
+ subprocessMock):
+ """
+ If subprocess.check_output raises CalledProcessError, we must
+ raise a corresponding SchedulingError.
+ """
+ sp = SlurmPipeline(
+ {
+ 'steps': [
+ {
+ 'cwd': 'dir',
+ 'name': 'name1',
+ 'script': 'script1',
+ },
+ ],
+ })
+
+ if version_info >= (3, 5):
+ subprocessMock.side_effect = CalledProcessError(
+ 3, 'command.sh', output='the stdout', stderr='the stderr')
+
+ error = ("^Could not execute step 'name1' script 'script1' in "
+ "directory 'dir'\. Attempted command: 'command.sh'\. "
+ "Exit status: 3\. Standard output: 'the stdout'\. "
+ "Standard error: 'the stderr'\.$")
+ else:
+ subprocessMock.side_effect = CalledProcessError(3, 'command.sh')
+
+ error = ("^Could not execute step 'name1' script 'script1' in "
+ "directory 'dir'\. Attempted command: 'command.sh'\. "
+ "Exit status: 3\.$")
+
+ assertRaisesRegex(self, SchedulingError, error, sp.schedule)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"discover",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-3.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | discover==0.4.0
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.10.0
-e git+https://github.com/acorg/slurm-pipeline.git@2aec1fd60f0765a26555768ec15719dcf973f294#egg=slurm_pipeline
tomli==2.2.1
| name: slurm-pipeline
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- discover==0.4.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.10.0
- tomli==2.2.1
prefix: /opt/conda/envs/slurm-pipeline
| [
"test/test_pipeline.py::TestSlurmPipeline::testSubprocessExecRaises"
] | [] | [
"test/test_pipeline.py::TestSlurmPipeline::testAccessAndExistsAreCalled",
"test/test_pipeline.py::TestSlurmPipeline::testAccessAndExistsAreCalledWithAbsolutePathScript",
"test/test_pipeline.py::TestSlurmPipeline::testAccessAndExistsAreCalledWithCwd",
"test/test_pipeline.py::TestSlurmPipeline::testCollectStepWithEmptyDependencies",
"test/test_pipeline.py::TestSlurmPipeline::testCollectStepWithNoDependencies",
"test/test_pipeline.py::TestSlurmPipeline::testCwdWithRelativeScriptPath",
"test/test_pipeline.py::TestSlurmPipeline::testDefaultNice",
"test/test_pipeline.py::TestSlurmPipeline::testErrorStep",
"test/test_pipeline.py::TestSlurmPipeline::testErrorStepWithNoDependencies",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepAndLastStepDifferent",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepAndLastStepSame",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepAndNoLastStep",
"test/test_pipeline.py::TestSlurmPipeline::testFirstStepOnly",
"test/test_pipeline.py::TestSlurmPipeline::testForce",
"test/test_pipeline.py::TestSlurmPipeline::testJSON",
"test/test_pipeline.py::TestSlurmPipeline::testLastStepBeforeFirstStep",
"test/test_pipeline.py::TestSlurmPipeline::testLastStepOnly",
"test/test_pipeline.py::TestSlurmPipeline::testNiceTooBig",
"test/test_pipeline.py::TestSlurmPipeline::testNiceTooSmall",
"test/test_pipeline.py::TestSlurmPipeline::testNonexecutableScript",
"test/test_pipeline.py::TestSlurmPipeline::testNonexistentDir",
"test/test_pipeline.py::TestSlurmPipeline::testNonexistentFirstStep",
"test/test_pipeline.py::TestSlurmPipeline::testNonexistentLastStep",
"test/test_pipeline.py::TestSlurmPipeline::testNonexistentScript",
"test/test_pipeline.py::TestSlurmPipeline::testRepeatedTaskJobId",
"test/test_pipeline.py::TestSlurmPipeline::testRepeatedTaskName",
"test/test_pipeline.py::TestSlurmPipeline::testScheduledTime",
"test/test_pipeline.py::TestSlurmPipeline::testScriptArgs",
"test/test_pipeline.py::TestSlurmPipeline::testSingleCollectorDependencyNoJobIds",
"test/test_pipeline.py::TestSlurmPipeline::testSingleCollectorDependencyTaskNamesAndJobIds",
"test/test_pipeline.py::TestSlurmPipeline::testSingleDependencySynchronousTaskNamesJobIdsAndCalls",
"test/test_pipeline.py::TestSlurmPipeline::testSingleDependencyTaskNamesJobIdsAndCalls",
"test/test_pipeline.py::TestSlurmPipeline::testSkipNone",
"test/test_pipeline.py::TestSlurmPipeline::testSkipNonexistentStep",
"test/test_pipeline.py::TestSlurmPipeline::testSkipNonexistentSteps",
"test/test_pipeline.py::TestSlurmPipeline::testSkipTwo",
"test/test_pipeline.py::TestSlurmPipeline::testSleep",
"test/test_pipeline.py::TestSlurmPipeline::testSleepNotCalledByDefault",
"test/test_pipeline.py::TestSlurmPipeline::testSleepNotCalledWhenZero",
"test/test_pipeline.py::TestSlurmPipeline::testSpecificNice",
"test/test_pipeline.py::TestSlurmPipeline::testStartAfter",
"test/test_pipeline.py::TestSlurmPipeline::testStepStdout",
"test/test_pipeline.py::TestSlurmPipeline::testStepsDict",
"test/test_pipeline.py::TestSlurmPipeline::testStringNice",
"test/test_pipeline.py::TestSlurmPipeline::testTaskScheduleTime",
"test/test_pipeline.py::TestSlurmPipeline::testTasksFollowingSchedule"
] | [] | MIT License | 1,582 | 631 | [
"setup.py",
"slurm_pipeline/pipeline.py"
] |
|
jupyter__nbgrader-867 | c647866318ccb67681d832afb8a0579d78fa6786 | 2017-08-12 00:28:39 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/converters/base.py b/nbgrader/converters/base.py
index c4af98b2..5fabe801 100644
--- a/nbgrader/converters/base.py
+++ b/nbgrader/converters/base.py
@@ -66,7 +66,12 @@ class BaseConverter(LoggingConfigurable):
self.exporter = self.exporter_class(parent=self, config=self.config)
for pp in self.preprocessors:
self.exporter.register_preprocessor(pp)
- self.convert_notebooks()
+ currdir = os.getcwd()
+ os.chdir(self.coursedir.root)
+ try:
+ self.convert_notebooks()
+ finally:
+ os.chdir(currdir)
@default("classes")
def _classes_default(self):
diff --git a/nbgrader/preprocessors/headerfooter.py b/nbgrader/preprocessors/headerfooter.py
index 56c5dc27..83c6aaa6 100644
--- a/nbgrader/preprocessors/headerfooter.py
+++ b/nbgrader/preprocessors/headerfooter.py
@@ -8,8 +8,8 @@ from ..nbgraderformat import read as read_nb
class IncludeHeaderFooter(NbGraderPreprocessor):
"""A preprocessor for adding header and/or footer cells to a notebook."""
- header = Unicode("", help="Path to header notebook").tag(config=True)
- footer = Unicode("", help="Path to footer notebook").tag(config=True)
+ header = Unicode("", help="Path to header notebook, relative to the root of the course directory").tag(config=True)
+ footer = Unicode("", help="Path to footer notebook, relative to the root of the course directory").tag(config=True)
def preprocess(self, nb, resources):
"""Concatenates the cells from the header and footer notebooks to the
| Error generating assignment and managing students
Firstly, I apologize if this is not the proper place to report issues like this. I am exploring nbgrader, and have had some trouble getting it to work.
I have installed nbgrader on a local Jupyterhub installation and have been working through the example notebooks.
I had to create a `~/.jupyter/nbgrader_config.py` file that has the following contents:
```
c = get_config()
c.CourseDirectory.root = '~/Jupyter/ChEn6703_test_nbgrader'
```
(note that I used the full path above, but replaced the prefix with `~` for security reasons)
There are a few strange things going on though:
## Problems with student entries
1. When I go to the `Manage Students` section of nbgrader, it doesn't show any students.
1. When I do `nbgrader db student list --log-level='DEBUG'` I get something which is inconsistent with the empty list in the `Manage Students` dialog.
```
[DbStudentListApp | DEBUG] Searching ['~/Jupyter/ChEn6703_test_nbgrader', '~/.jupyter', '/usr/etc/jupyter', '/usr/local/etc/jupyter', '/etc/jupyter'] for config files
[DbStudentListApp | DEBUG] Looking for jupyter_config in /etc/jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in /usr/local/etc/jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in /usr/etc/jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in ~/.jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in ~/Jupyter/ChEn6703_test_nbgrader
[DbStudentListApp | DEBUG] Looking for nbgrader_config in /etc/jupyter
[DbStudentListApp | DEBUG] Looking for nbgrader_config in /usr/local/etc/jupyter
[DbStudentListApp | DEBUG] Looking for nbgrader_config in /usr/etc/jupyter
[DbStudentListApp | DEBUG] Looking for nbgrader_config in ~/.jupyter
[DbStudentListApp | DEBUG] Loaded config file: ~/.jupyter/nbgrader_config.py
[DbStudentListApp | DEBUG] Looking for nbgrader_config in ~/Jupyter/ChEn6703_test_nbgrader
[DbStudentListApp | DEBUG] Loaded config file: ~/Jupyter/ChEn6703_test_nbgrader/nbgrader_conf
ig.py
[DbStudentListApp | DEBUG] Looking for nbgrader_config in ~/Jupyter/ChEn6703_test_nbgrader
[DbStudentListApp | DEBUG] Loaded config file: ~/Jupyter/ChEn6703_test_nbgrader/nbgrader_conf
ig.py
There are 1 students in the database:
1 (Flinstone, Fred) -- None
```
3. If I manually enter a student in the `Manage Students` dialog, the student shows up and then disappears.
## Problems when generating the assignment
When I go to `Manage Assignments` and click on the `Generate` icon for the example `ps1` assignment, I get:
```
[INFO] Copying ~/Jupyter/ChEn6703_test_nbgrader/source/./ps1/jupyter.png -> ~/Jupyter/ChEn6703_test_nbgrader/release/./ps1/jupyter.png
[INFO] Updating/creating assignment 'ps1': {}
[INFO] Converting notebook ~/Jupyter/ChEn6703_test_nbgrader/source/./ps1/problem1.ipynb
[ERROR] There was an error processing assignment: ~/Jupyter/ChEn6703_test_nbgrader/source/./ps1
[ERROR] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/nbgrader/converters/base.py", line 288, in convert_notebooks
self.convert_single_notebook(notebook_filename)
File "/usr/local/lib/python3.5/dist-packages/nbgrader/converters/base.py", line 244, in convert_single_notebook
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 172, in from_filename
return self.from_file(f, resources=resources, **kw)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 190, in from_file
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/notebook.py", line 31, in from_notebook_node
nb_copy, resources = super(NotebookExporter, self).from_notebook_node(nb, resources, **kw)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 132, in from_notebook_node
nb_copy, resources = self._preprocess(nb_copy, resources)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 309, in _preprocess
nbc, resc = preprocessor(nbc, resc)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/preprocessors/base.py", line 47, in __call__
return self.preprocess(nb,resources)
File "/usr/local/lib/python3.5/dist-packages/nbgrader/preprocessors/headerfooter.py", line 23, in preprocess
with open(self.header, 'r') as fh:
FileNotFoundError: [Errno 2] No such file or directory: 'source/header.ipynb'
[WARNING] Removing failed assignment: ~/Jupyter/ChEn6703_test_nbgrader/release/ps1
[ERROR] There was an error processing assignment 'ps1' for student '.'
[ERROR] Please see the the above traceback for details on the specific errors on the above failures.
Traceback
```
I have tried regenerating the entire class and this issue persists.
If I try the command line approach: `nbgrader assign source/ps1/` I get:
```
[AssignApp | ERROR] No notebooks were matched by ~/Jupyter/ChEn6703_test_nbgrader/source/./s
```
Note the `source/./s` which seems problematic.
Any ideas on this? | jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_assign.py b/nbgrader/tests/apps/test_nbgrader_assign.py
index 85539e3e..73575ef2 100644
--- a/nbgrader/tests/apps/test_nbgrader_assign.py
+++ b/nbgrader/tests/apps/test_nbgrader_assign.py
@@ -277,3 +277,15 @@ class TestNbGraderAssign(BaseTestApp):
run_nbgrader(["assign", "ps1", "--no-db", "--no-metadata"])
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
+ def test_header(self, course_dir):
+ """Does the relative path to the header work?"""
+ self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
+ self._empty_notebook(join(course_dir, 'source', 'header.ipynb'))
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name="ps1")]\n""")
+ fh.write("""c.IncludeHeaderFooter.header = "source/header.ipynb"\n""")
+ run_nbgrader(["assign", "ps1"])
+ assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb"))
+
+
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r dev-requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-rerunfailures",
"coverage",
"selenium",
"invoke",
"sphinx",
"codecov",
"cov-core",
"nbval"
],
"pre_install": [
"pip install -U pip wheel setuptools"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@c647866318ccb67681d832afb8a0579d78fa6786#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- setuptools==59.6.0
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_header"
] | [] | [
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_help",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_no_args",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_conflicting_args",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_multiple_args",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_no_assignment",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_single_file",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_multiple_files",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_dependent_files",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_save_cells",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_force",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_permissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_custom_permissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_add_remove_extra_notebooks",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_add_extra_notebooks_with_submissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_remove_extra_notebooks_with_submissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_same_notebooks_with_submissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_force_single_notebook",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_fail_no_notebooks",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_no_metadata"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,584 | 421 | [
"nbgrader/converters/base.py",
"nbgrader/preprocessors/headerfooter.py"
] |
|
pydicom__pydicom-463 | affb1cf10c6be2aca311c29ddddc622f8bd1f810 | 2017-08-12 04:37:09 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | darcymason: Just noticed that since only values are compared, it is possible to pass even if some of the tags are different but have same value (very unlikely, but could happen if many entries were empty string, for example).
Would be nice to add a test too, e.g. the example from the issue first comment.
I'm not sure deriving from OrderedDict is worth it, there are only a few places where ordering is needed.
mrbean-bremen: Ok, you are probably right - it doesn't make much sense then.
scaramallion: `Dataset.values()` returns a list of `DataElement` so different tags with the same value will return False since `DataElement` equality checks tag, VR and value.
I already added a test reproducing #462 as my first commit, unless you're referring to something else?
darcymason: You're right of course, my bad, just looking too quickly | diff --git a/pydicom/dataset.py b/pydicom/dataset.py
index 69b68f7ef..6e0f82daf 100644
--- a/pydicom/dataset.py
+++ b/pydicom/dataset.py
@@ -400,8 +400,10 @@ class Dataset(dict):
# __dict__
# Convert values() to a list for compatibility between
# python 2 and 3
- return (list(self.values()) == list(other.values()) and
- self.__dict__ == other.__dict__)
+ # Sort values() by element tag
+ self_elem = sorted(list(self.values()), key=lambda x: x.tag)
+ other_elem = sorted(list(other.values()), key=lambda x: x.tag)
+ return self_elem == other_elem and self.__dict__ == other.__dict__
return NotImplemented
| Dataset equality fails when elements added in different order
#### Description
Dataset equality produces incorrect results
#### Steps/Code to Reproduce
```python
>>> from pydicom.dataset import Dataset
>>> ds = Dataset()
>>> ds.PatientName = 'Test'
>>> ds.PatientID = '1234'
>>> ds2 = Dataset()
>>> ds2.PatientID = '1234'
>>> ds2.PatientName = 'Test
>>> ds == ds2
False
>>> ds2 = Dataset()
>>> ds2.PatientName = 'Test'
>>> ds2.PatientID = '1234'
>>> ds == ds2
True
```
#### Versions
```python
>>> import platform; print(platform.platform())
Linux-4.10.0-30-generic-x86_64-with-Ubuntu-17.04-zesty
>>> import sys; print("Python", sys.version)
Python 3.6.1 (default, Mar 22 2017, 06:17:05)
[GCC 6.3.0 20170321]
>>> import pydicom; print("pydicom", pydicom.__version__)
pydicom 1.0.0a1
``` | pydicom/pydicom | diff --git a/pydicom/tests/test_dataset.py b/pydicom/tests/test_dataset.py
index 01dfa0453..f25160446 100644
--- a/pydicom/tests/test_dataset.py
+++ b/pydicom/tests/test_dataset.py
@@ -401,9 +401,11 @@ class DatasetTests(unittest.TestCase):
"""Dataset: equality returns correct value with simple dataset"""
d = Dataset()
d.SOPInstanceUID = '1.2.3.4'
+ d.PatientName = 'Test'
self.assertTrue(d == d)
e = Dataset()
+ e.PatientName = 'Test'
e.SOPInstanceUID = '1.2.3.4'
self.assertTrue(d == e)
@@ -457,6 +459,7 @@ class DatasetTests(unittest.TestCase):
d.SOPInstanceUID = '1.2.3.4'
d.BeamSequence = []
beam_seq = Dataset()
+ beam_seq.PatientID = '1234'
beam_seq.PatientName = 'ANON'
d.BeamSequence.append(beam_seq)
self.assertTrue(d == d)
@@ -466,6 +469,7 @@ class DatasetTests(unittest.TestCase):
e.BeamSequence = []
beam_seq = Dataset()
beam_seq.PatientName = 'ANON'
+ beam_seq.PatientID = '1234'
e.BeamSequence.append(beam_seq)
self.assertTrue(d == e)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/pydicom/pydicom.git@affb1cf10c6be2aca311c29ddddc622f8bd1f810#egg=pydicom
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityNoSequence",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualitySequence"
] | [
"pydicom/tests/test_dataset.py::DatasetTests::test_get_item"
] | [
"pydicom/tests/test_dataset.py::DatasetTests::testAttributeErrorInProperty",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomAttr",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomAttrWeDontHave",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomCommandGroupLength",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteItemLong",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteItemTuple",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteNonExistingItem",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteOtherAttr",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityInheritance",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityNotDataset",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityPrivate",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityUnknown",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault1",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault2",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault3",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault4",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists1",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists2",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists3",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists4",
"pydicom/tests/test_dataset.py::DatasetTests::testGetFromRaw",
"pydicom/tests/test_dataset.py::DatasetTests::testHash",
"pydicom/tests/test_dataset.py::DatasetTests::testMembership",
"pydicom/tests/test_dataset.py::DatasetTests::testSetExistingDataElementByName",
"pydicom/tests/test_dataset.py::DatasetTests::testSetNewDataElementByName",
"pydicom/tests/test_dataset.py::DatasetTests::testSetNonDicom",
"pydicom/tests/test_dataset.py::DatasetTests::testTagExceptionPrint",
"pydicom/tests/test_dataset.py::DatasetTests::testTagExceptionWalk",
"pydicom/tests/test_dataset.py::DatasetTests::testUpdate",
"pydicom/tests/test_dataset.py::DatasetTests::test_NamedMemberUpdated",
"pydicom/tests/test_dataset.py::DatasetTests::test__setitem__",
"pydicom/tests/test_dataset.py::DatasetTests::test_add_repeater_elem_by_keyword",
"pydicom/tests/test_dataset.py::DatasetTests::test_attribute_error_in_property_correct_debug",
"pydicom/tests/test_dataset.py::DatasetTests::test_contains",
"pydicom/tests/test_dataset.py::DatasetTests::test_data_element",
"pydicom/tests/test_dataset.py::DatasetTests::test_delitem_slice",
"pydicom/tests/test_dataset.py::DatasetTests::test_dir",
"pydicom/tests/test_dataset.py::DatasetTests::test_dir_filter",
"pydicom/tests/test_dataset.py::DatasetTests::test_empty_slice",
"pydicom/tests/test_dataset.py::DatasetTests::test_exit_exception",
"pydicom/tests/test_dataset.py::DatasetTests::test_formatted_lines",
"pydicom/tests/test_dataset.py::DatasetTests::test_get_pixel_array_already_have",
"pydicom/tests/test_dataset.py::DatasetTests::test_get_raises",
"pydicom/tests/test_dataset.py::DatasetTests::test_getitem_slice",
"pydicom/tests/test_dataset.py::DatasetTests::test_getitem_slice_raises",
"pydicom/tests/test_dataset.py::DatasetTests::test_group_dataset",
"pydicom/tests/test_dataset.py::DatasetTests::test_inequality",
"pydicom/tests/test_dataset.py::DatasetTests::test_is_uncompressed_transfer_syntax",
"pydicom/tests/test_dataset.py::DatasetTests::test_iterall",
"pydicom/tests/test_dataset.py::DatasetTests::test_matching_tags",
"pydicom/tests/test_dataset.py::DatasetTests::test_property",
"pydicom/tests/test_dataset.py::DatasetTests::test_remove_private_tags",
"pydicom/tests/test_dataset.py::DatasetTests::test_reshape_pixel_array_not_implemented",
"pydicom/tests/test_dataset.py::DatasetTests::test_save_as",
"pydicom/tests/test_dataset.py::DatasetTests::test_set_convert_private_elem_from_raw",
"pydicom/tests/test_dataset.py::DatasetTests::test_setitem_slice_raises",
"pydicom/tests/test_dataset.py::DatasetTests::test_top",
"pydicom/tests/test_dataset.py::DatasetTests::test_trait_names",
"pydicom/tests/test_dataset.py::DatasetTests::test_walk",
"pydicom/tests/test_dataset.py::DatasetTests::test_with",
"pydicom/tests/test_dataset.py::DatasetElementsTests::testSequenceAssignment",
"pydicom/tests/test_dataset.py::FileDatasetTests::test_creation_with_container",
"pydicom/tests/test_dataset.py::FileDatasetTests::test_equality_file_meta"
] | [] | MIT License | 1,585 | 201 | [
"pydicom/dataset.py"
] |
OpenMined__PySyft-122 | 16dae46b154dd755c18e941409b4ec771f5e2ca5 | 2017-08-12 11:07:35 | 16dae46b154dd755c18e941409b4ec771f5e2ca5 | diff --git a/syft/tensor.py b/syft/tensor.py
index 6d1ce8dc30..7d549da9bb 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -8,7 +8,7 @@ def _ensure_ndarray(arr):
class TensorBase(object):
"""
- A base tensor class that perform basic element-wise operation such as
+ A base tensor class that perform basic element-wise operation such as
addition, subtraction, multiplication and division
"""
@@ -83,3 +83,20 @@ class TensorBase(object):
arr_like = _ensure_ndarray(arr_like)
self.data = self.data / arr_like
return self.data
+
+ def shape(self):
+ """Returns a tuple of input array dimensions."""
+ if self.encrypted:
+ return NotImplemented
+
+ return self.data.shape
+
+ def sum(self, dim=None):
+ """Returns the sum of all elements in the input array."""
+ if self.encrypted:
+ return NotImplemented
+
+ if dim is None:
+ return self.data.sum()
+ else:
+ return self.data.sum(axis=dim)
| Implement Default sum Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, I want to leverage a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, sum() should return a new tensor. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index ae9cb22b63..8222b8250d 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -7,7 +7,7 @@ class AddTests(unittest.TestCase):
def testSimple(self):
t = TensorBase(np.array([1,2,3]))
self.assertTrue(np.array_equal(t + np.array([1,2,3]), [2,4,6]))
-
+
def testInplace(self):
t = TensorBase(np.array([1,2,3]))
t += np.array([1,2,3])
@@ -21,7 +21,7 @@ class SubTests(unittest.TestCase):
def testSimple(self):
t = TensorBase(np.array([1,2,3]))
self.assertTrue(np.array_equal(t - np.array([1,2,3]), [0,0,0]))
-
+
def testInplace(self):
t = TensorBase(np.array([1,2,3]))
t -= np.array([1,2,3])
@@ -58,9 +58,23 @@ class DivTests(unittest.TestCase):
def testScalar(self):
t = TensorBase(np.array([2,4,6]))
self.assertTrue(np.array_equal(t / 2, [1, 2, 3]))
-
+
+class ShapeTests(unittest.TestCase):
+ def testShape(self):
+ t = TensorBase(np.array([[0, 1], [0, 5]]))
+ self.assertTrue(np.array_equal(t.shape(), (2, 2)))
+
+class SumTests(unittest.TestCase):
+ def testDimNoneInt(self):
+ t = TensorBase(np.array([1,2,3]))
+ self.assertTrue(np.array_equal(t.sum(), 6))
+
+ def testDimIsNotNoneInt(self):
+ t = TensorBase(np.array([[0, 1], [0, 5]]))
+ self.assertTrue(np.array_equal(t.sum(dim=1), [1, 5]))
+
def main():
unittest.main()
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
args==0.1.0
arrow==1.3.0
asttokens==3.0.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
clint==0.5.1
comm==0.2.2
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
executing==2.2.0
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==6.29.5
ipython==8.18.1
ipywidgets==8.1.5
isoduration==20.11.0
jedi==0.19.2
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter==1.1.1
jupyter-console==6.6.3
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.3.6
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==3.0.13
line_profiler==4.2.0
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mistune==3.1.3
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nest-asyncio==1.6.0
notebook==7.3.3
notebook_shim==0.2.4
numpy==1.26.4
overrides==7.7.0
packaging==24.2
pandocfilters==1.5.1
parso==0.8.4
pexpect==4.9.0
phe==1.5.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pycparser==2.22
Pygments==2.19.1
pyRserve==1.0.4
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
stack-data==0.6.3
-e git+https://github.com/OpenMined/PySyft.git@16dae46b154dd755c18e941409b4ec771f5e2ca5#egg=syft
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
uri-template==1.3.0
urllib3==2.3.0
wcwidth==0.2.13
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==4.0.13
zipp==3.21.0
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- args==0.1.0
- arrow==1.3.0
- asttokens==3.0.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- clint==0.5.1
- comm==0.2.2
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- executing==2.2.0
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==6.29.5
- ipython==8.18.1
- ipywidgets==8.1.5
- isoduration==20.11.0
- jedi==0.19.2
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter==1.1.1
- jupyter-client==8.6.3
- jupyter-console==6.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.3.6
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==3.0.13
- line-profiler==4.2.0
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mistune==3.1.3
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nest-asyncio==1.6.0
- notebook==7.3.3
- notebook-shim==0.2.4
- numpy==1.26.4
- overrides==7.7.0
- packaging==24.2
- pandocfilters==1.5.1
- parso==0.8.4
- pexpect==4.9.0
- phe==1.5.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pycparser==2.22
- pygments==2.19.1
- pyrserve==1.0.4
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- stack-data==0.6.3
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- uri-template==1.3.0
- urllib3==2.3.0
- wcwidth==0.2.13
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==4.0.13
- zipp==3.21.0
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt"
] | [] | [
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple"
] | [] | Apache License 2.0 | 1,586 | 277 | [
"syft/tensor.py"
] |
|
google__mobly-311 | 25b676a196403ef3e1d2f7516008d58d3649d888 | 2017-08-16 00:27:44 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | diff --git a/mobly/signals.py b/mobly/signals.py
index 8899065..85bdc30 100644
--- a/mobly/signals.py
+++ b/mobly/signals.py
@@ -46,6 +46,10 @@ class TestSignal(Exception):
return 'Details=%s, Extras=%s' % (self.details, self.extras)
+class TestError(TestSignal):
+ """Raised when a test has an unexpected error."""
+
+
class TestFailure(TestSignal):
"""Raised when a test has failed."""
| Exceptions in `setup_test` should leave the test in `ERROR` status
Regardless of the type of the exception, `setup_test` error should cause `ERROR` status.
This is different from a test method.
In a test method, an exception based on signals.TestFailure should cause the test to exit with `FAILED` status.
This is to be consistent with pyunit's behavior. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index 5233aa5..649f6d5 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -17,6 +17,7 @@ import copy
import functools
import inspect
import logging
+import sys
from mobly import logger
from mobly import records
@@ -175,6 +176,19 @@ class BaseTestClass(object):
Implementation is optional.
"""
+ def _teardown_class(self):
+ """Proxy function to guarantee the base implementation of
+ teardown_class is called.
+ """
+ record = records.TestResultRecord('teardown_class', self.TAG)
+ record.test_begin()
+ try:
+ self.teardown_class()
+ except Exception as e:
+ record.test_error(e)
+ record.update_record()
+ self.results.add_class_error(record)
+
def teardown_class(self):
"""Teardown function that will be called after all the selected tests in
the test class have been executed.
@@ -316,7 +330,7 @@ class BaseTestClass(object):
Executes setup_test, the test method, and teardown_test; then creates a
records.TestResultRecord object with the execution information and adds
- the record to the test class's test results.
+ the record to the test class's test result s.
Args:
test_name: Name of the test.
@@ -330,7 +344,12 @@ class BaseTestClass(object):
teardown_test_failed = False
try:
try:
- self._setup_test(test_name)
+ try:
+ self._setup_test(test_name)
+ except signals.TestFailure as e:
+ new_e = signals.TestError(e.details, e.extras)
+ _, _, new_e.__traceback__ = sys.exc_info()
+ raise new_e
if args or kwargs:
test_method(*args, **kwargs)
else:
@@ -563,7 +582,7 @@ class BaseTestClass(object):
# Skip all tests peacefully.
e.details = 'setup_class aborted due to: %s' % e.details
self._skip_remaining_tests(e)
- self._safe_exec_func(self.teardown_class)
+ self._teardown_class()
return self.results
except Exception as e:
# Setup class failed for unknown reasons.
@@ -577,7 +596,7 @@ class BaseTestClass(object):
self.summary_writer.dump(class_record.to_dict(),
records.TestSummaryEntryType.RECORD)
self._skip_remaining_tests(e)
- self._safe_exec_func(self.teardown_class)
+ self._teardown_class()
return self.results
# Run tests in order.
try:
@@ -594,7 +613,7 @@ class BaseTestClass(object):
setattr(e, 'results', self.results)
raise e
finally:
- self._safe_exec_func(self.teardown_class)
+ self._teardown_class()
logging.info('Summary for test class %s: %s', self.TAG,
self.results.summary_str())
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index bd7dce9..d615f3f 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -203,6 +203,28 @@ class BaseTestTest(unittest.TestCase):
teardown_class_call_check.assert_called_once_with("heehee")
on_fail_call_check.assert_called_once_with("haha")
+ def test_teardown_class_fail_by_exception(self):
+ class MockBaseTest(base_test.BaseTestClass):
+ def test_something(self):
+ pass
+
+ def teardown_class(self):
+ raise Exception(MSG_EXPECTED_EXCEPTION)
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run()
+ test_record = bt_cls.results.passed[0]
+ class_record = bt_cls.results.error[0]
+ self.assertFalse(bt_cls.results.is_all_pass)
+ self.assertEqual(class_record.test_name, 'teardown_class')
+ self.assertEqual(class_record.details, MSG_EXPECTED_EXCEPTION)
+ self.assertIsNotNone(class_record.begin_time)
+ self.assertIsNotNone(class_record.end_time)
+ self.assertIsNone(class_record.extras)
+ expected_summary = ('Error 1, Executed 1, Failed 0, Passed 1, '
+ 'Requested 1, Skipped 0')
+ self.assertEqual(bt_cls.results.summary_str(), expected_summary)
+
def test_setup_test_fail_by_exception(self):
mock_on_fail = mock.Mock()
@@ -223,6 +245,10 @@ class BaseTestTest(unittest.TestCase):
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
+ self.assertTrue('in setup_test\n '
+ 'raise Exception(MSG_EXPECTED_EXCEPTION)\n'
+ 'Exception: This is an expected exception.\n' in
+ actual_record.stacktrace)
self.assertIsNone(actual_record.extras)
expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
@@ -239,11 +265,13 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run(test_names=["test_something"])
- actual_record = bt_cls.results.failed[0]
+ actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
+ # Make sure the full stacktrace of `setup_test` is preserved.
+ self.assertTrue('self.setup_test()' in actual_record.stacktrace)
self.assertIsNone(actual_record.extras)
- expected_summary = ("Error 0, Executed 1, Failed 1, Passed 0, "
+ expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
@@ -407,6 +435,7 @@ class BaseTestTest(unittest.TestCase):
def test_procedure_function_gets_correct_record(self):
on_fail_mock = mock.MagicMock()
+
class MockBaseTest(base_test.BaseTestClass):
def on_fail(self, record):
on_fail_mock.record = record
@@ -418,12 +447,16 @@ class BaseTestTest(unittest.TestCase):
bt_cls.run()
actual_record = bt_cls.results.failed[0]
self.assertEqual(actual_record.test_name, 'test_something')
- self.assertEqual(on_fail_mock.record.test_name, actual_record.test_name)
- self.assertEqual(on_fail_mock.record.begin_time, actual_record.begin_time)
+ self.assertEqual(on_fail_mock.record.test_name,
+ actual_record.test_name)
+ self.assertEqual(on_fail_mock.record.begin_time,
+ actual_record.begin_time)
self.assertEqual(on_fail_mock.record.end_time, actual_record.end_time)
- self.assertEqual(on_fail_mock.record.stacktrace, actual_record.stacktrace)
+ self.assertEqual(on_fail_mock.record.stacktrace,
+ actual_record.stacktrace)
self.assertEqual(on_fail_mock.record.extras, actual_record.extras)
- self.assertEqual(on_fail_mock.record.extra_errors, actual_record.extra_errors)
+ self.assertEqual(on_fail_mock.record.extra_errors,
+ actual_record.extra_errors)
# But they are not the same object.
self.assertIsNot(on_fail_mock.record, actual_record)
@@ -989,6 +1022,23 @@ class BaseTestTest(unittest.TestCase):
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertEqual(actual_record.extras, MOCK_EXTRA)
+ def test_skip_in_setup_test(self):
+ class MockBaseTest(base_test.BaseTestClass):
+ def setup_test(self):
+ asserts.skip(MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA)
+
+ def test_func(self):
+ never_call()
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run(test_names=["test_func"])
+ actual_record = bt_cls.results.skipped[0]
+ self.assertIsNotNone(actual_record.begin_time)
+ self.assertIsNotNone(actual_record.end_time)
+ self.assertEqual(actual_record.test_name, "test_func")
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
+ self.assertEqual(actual_record.extras, MOCK_EXTRA)
+
def test_unpack_userparams_required(self):
"""Missing a required param should raise an error."""
required = ["some_param"]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@25b676a196403ef3e1d2f7516008d58d3649d888#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal"
] | [] | [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_exception_objects_in_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_cannot_modify_original_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_cannot_modify_original_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_procedure_function_gets_correct_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_promote_extra_errors_to_termination_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_in_setup_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing"
] | [] | Apache License 2.0 | 1,592 | 136 | [
"mobly/signals.py"
] |
|
python-odin__odinweb-11 | cc4650b45a90fa41346ed53cf970cf0da414a44a | 2017-08-21 14:23:31 | cc4650b45a90fa41346ed53cf970cf0da414a44a | diff --git a/odinweb/containers.py b/odinweb/containers.py
index 43622c4..975da81 100644
--- a/odinweb/containers.py
+++ b/odinweb/containers.py
@@ -274,6 +274,17 @@ class ApiInterfaceBase(ApiContainer):
"""
Handle an *un-handled* exception.
"""
+ # Let middleware attempt to handle exception
+ try:
+ for middleware in self.middleware.handle_500:
+ resource = middleware(request, exception)
+ if resource:
+ return resource
+
+ except Exception as ex: # noqa - This is a top level handler
+ exception = ex
+
+ # Fallback to generic error
logger.exception('Internal Server Error: %s', exception, extra={
'status_code': 500,
'request': request
@@ -343,7 +354,12 @@ class ApiInterfaceBase(ApiContainer):
# error processing, this often provides convenience features
# to aid in the debugging process.
raise
- resource = self.handle_500(request, e)
+
+ resource = None
+ # Fallback to the default handler
+ if resource is None:
+ resource = self.handle_500(request, e)
+
status = resource.status
if isinstance(status, HTTPStatus):
diff --git a/odinweb/data_structures.py b/odinweb/data_structures.py
index cd79032..3ec6db0 100644
--- a/odinweb/data_structures.py
+++ b/odinweb/data_structures.py
@@ -439,6 +439,14 @@ class MiddlewareList(list):
middleware = sort_by_priority(self, reverse=True)
return tuple(m.post_dispatch for m in middleware if hasattr(m, 'post_dispatch'))
+ @lazy_property
+ def handle_500(self):
+ """
+ List of handle-error methods from registered middleware.
+ """
+ middleware = sort_by_priority(self, reverse=True)
+ return tuple(m.handle_500 for m in middleware if hasattr(m, 'handle_500'))
+
@lazy_property
def post_swagger(self):
"""
| Added error hook for middleware
Allow middleware to catch errors.
Returning an explicit `True` indicates that the exception has been handled | python-odin/odinweb | diff --git a/tests/test_containers.py b/tests/test_containers.py
index 475acd4..0be6c61 100644
--- a/tests/test_containers.py
+++ b/tests/test_containers.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
import pytest
+from odinweb.resources import Error
from odin.exceptions import ValidationError
from odinweb import api
@@ -302,6 +303,37 @@ class TestApiInterfaceBase(object):
with pytest.raises(ValueError):
target.dispatch(operation, MockRequest())
+ def test_dispatch__error_handled_by_middleware(self):
+ class ErrorMiddleware(object):
+ def handle_500(self, request, exception):
+ assert isinstance(exception, ValueError)
+ return Error.from_status(HTTPStatus.SEE_OTHER, 0,
+ "Quick over there...")
+
+ def callback(request):
+ raise ValueError()
+
+ target = containers.ApiInterfaceBase(middleware=[ErrorMiddleware()])
+ operation = Operation(callback)
+
+ actual = target.dispatch(operation, MockRequest())
+ assert actual.status == 303
+
+ def test_dispatch__error_handled_by_middleware_raises_exception(self):
+ class ErrorMiddleware(object):
+ def handle_500(self, request, exception):
+ assert isinstance(exception, ValueError)
+ raise ValueError
+
+ def callback(request):
+ raise ValueError()
+
+ target = containers.ApiInterfaceBase(middleware=[ErrorMiddleware()])
+ operation = Operation(callback)
+
+ actual = target.dispatch(operation, MockRequest())
+ assert actual.status == 500
+
def test_dispatch__encode_error_with_debug_enabled(self):
def callback(request):
raise api.ImmediateHttpResponse(ValueError, HTTPStatus.NOT_MODIFIED, {})
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install -r requirements-dev.txt"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
odin==1.8.1
-e git+https://github.com/python-odin/odinweb.git@cc4650b45a90fa41346ed53cf970cf0da414a44a#egg=odinweb
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-mock==3.6.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: odinweb
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- odin==1.8.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/odinweb
| [
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__error_handled_by_middleware"
] | [
"tests/test_containers.py::TestApiContainer::test_extra_option"
] | [
"tests/test_containers.py::TestResourceApiMeta::test_empty_api",
"tests/test_containers.py::TestResourceApiMeta::test_normal_api",
"tests/test_containers.py::TestResourceApiMeta::test_sub_classed_api",
"tests/test_containers.py::TestResourceApi::test_api_name__default",
"tests/test_containers.py::TestResourceApi::test_api_name__custom",
"tests/test_containers.py::TestResourceApi::test_op_paths",
"tests/test_containers.py::TestApiContainer::test_options[options0-name-None]",
"tests/test_containers.py::TestApiContainer::test_options[options1-name-foo]",
"tests/test_containers.py::TestApiContainer::test_options[options2-path_prefix-value2]",
"tests/test_containers.py::TestApiContainer::test_options[options3-path_prefix-value3]",
"tests/test_containers.py::TestApiContainer::test_options[options4-path_prefix-value4]",
"tests/test_containers.py::TestApiContainer::test_op_paths",
"tests/test_containers.py::TestApiContainer::test_op_paths__no_sub_path",
"tests/test_containers.py::TestApiCollection::test_version_options[options0-1-v1-path_prefix0]",
"tests/test_containers.py::TestApiCollection::test_version_options[options1-2-v2-path_prefix1]",
"tests/test_containers.py::TestApiCollection::test_version_options[options2-3-version-3-path_prefix2]",
"tests/test_containers.py::TestApiCollection::test_register_operation",
"tests/test_containers.py::TestApiInterfaceBase::test_options[options0-api-False-path_prefix0]",
"tests/test_containers.py::TestApiInterfaceBase::test_options[options1-!api-False-path_prefix1]",
"tests/test_containers.py::TestApiInterfaceBase::test_options[options2-api-False-path_prefix2]",
"tests/test_containers.py::TestApiInterfaceBase::test_options[options3-api-True-path_prefix3]",
"tests/test_containers.py::TestApiInterfaceBase::test_init_non_absolute",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__invalid_headers[r0-422-Unprocessable",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__invalid_headers[r1-406-URI",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__invalid_headers[r2-405-Specified",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__exceptions[error0-HTTPStatus.NOT_MODIFIED]",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__exceptions[error1-400]",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__exceptions[error2-400]",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__exceptions[NotImplementedError-501]",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__exceptions[ValueError-500]",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__exceptions[error5-500]",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__with_middleware",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__error_with_debug_enabled",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__error_handled_by_middleware_raises_exception",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__encode_error_with_debug_enabled",
"tests/test_containers.py::TestApiInterfaceBase::test_dispatch__http_response",
"tests/test_containers.py::TestApiInterfaceBase::test_op_paths",
"tests/test_containers.py::TestApiInterfaceBase::test_op_paths__collate_methods"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,606 | 524 | [
"odinweb/containers.py",
"odinweb/data_structures.py"
] |
|
pydicom__pydicom-490 | 1bd33e3ceec19d45844676bdd25367fda4c5319b | 2017-08-21 19:51:21 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | pep8speaks: Hello @mrbean-bremen! Thanks for updating the PR.
- In the file [`pydicom/filewriter.py`](https://github.com/pydicom/pydicom/blob/dd7516dd80edd1270b7b8fac567b5dfc9aa4e1e1/pydicom/filewriter.py), following are the PEP8 issues :
> [Line 417:80](https://github.com/pydicom/pydicom/blob/dd7516dd80edd1270b7b8fac567b5dfc9aa4e1e1/pydicom/filewriter.py#L417): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 421:80](https://github.com/pydicom/pydicom/blob/dd7516dd80edd1270b7b8fac567b5dfc9aa4e1e1/pydicom/filewriter.py#L421): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 423:80](https://github.com/pydicom/pydicom/blob/dd7516dd80edd1270b7b8fac567b5dfc9aa4e1e1/pydicom/filewriter.py#L423): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
darcymason: @glemaitre, I'm assuming no further comments and going ahead to merge; please add a comment if needed. | diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index 4181cce16..efb596845 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -415,6 +415,16 @@ def write_data_element(fp, data_element, encoding=default_encoding):
if (hasattr(data_element, "is_undefined_length")
and data_element.is_undefined_length):
is_undefined_length = True
+ # valid pixel data with undefined length shall contain encapsulated
+ # data, e.g. sequence items - raise ValueError otherwise (see #238)
+ if data_element.tag == 0x7fe00010: # pixel data
+ val = data_element.value
+ if (fp.is_little_endian and not
+ val.startswith(b'\xfe\xff\x00\xe0') or
+ not fp.is_little_endian and
+ not val.startswith(b'\xff\xfe\xe0\x00')):
+ raise ValueError('Pixel Data with undefined length must '
+ 'start with an item tag')
location = fp.tell()
fp.seek(length_location)
if not fp.is_implicit_VR and VR not in extra_length_VRs:
| Malformed PixelData
I am trying to convert color spaces using:
``` python
arr = convert_ybr_to_rgb(ds.pixel_array)
ds.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
ds.is_little_endian = True
ds.is_implicit_VR = False
ds.PixelData = arr.tostring()
ds.PlanarConfiguration = 0
ds.PhotometricInterpretation = 'RGB'
```
However `dcmcjpeg` then barfs when it tries to open the saved file with:
```
W: DcmSequenceOfItems: Length of item in sequence PixelData (7fe0,0010) is odd
E: DcmSequenceOfItems: Parse error in sequence (7fe0,0010), found (292a,2a48) instead of sequence delimiter (fffe,e0dd)
F: Sequence Delimitation Item missing: reading file: /var/folders/nk/5v0p39pn4yg7c_3vtydljk000000gn/T/tmpLZfsSL.dcm
```
| pydicom/pydicom | diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index 70567f134..4d7814ed0 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -41,7 +41,6 @@ except AttributeError:
except ImportError:
print("unittest2 is required for testing in python2.6")
-
rtplan_name = get_testdata_files("rtplan.dcm")[0]
rtdose_name = get_testdata_files("rtdose.dcm")[0]
ct_name = get_testdata_files("CT_small.dcm")[0]
@@ -263,8 +262,8 @@ class WriteDataElementTests(unittest.TestCase):
# Was issue 74
data_elem = DataElement(0x00280009, "AT", [])
expected = hex2bytes((
- " 28 00 09 00" # (0028,0009) Frame Increment Pointer
- " 00 00 00 00" # length 0
+ " 28 00 09 00" # (0028,0009) Frame Increment Pointer
+ " 00 00 00 00" # length 0
))
write_data_element(self.f1, data_elem)
got = self.f1.getvalue()
@@ -1731,6 +1730,74 @@ class TestWriteFileMetaInfoNonStandard(unittest.TestCase):
self.assertEqual(meta, ref_meta)
+class TestWriteUndefinedLengthPixelData(unittest.TestCase):
+ """Test write_data_element() for pixel data with undefined length."""
+
+ def setUp(self):
+ self.fp = DicomBytesIO()
+
+ def test_little_endian_correct_data(self):
+ """Pixel data starting with an item tag is written."""
+ self.fp.is_little_endian = True
+ self.fp.is_implicit_VR = False
+ pixel_data = DataElement(0x7fe00010, 'OB',
+ b'\xfe\xff\x00\xe0'
+ b'\x00\x01\x02\x03',
+ is_undefined_length=True)
+ write_data_element(self.fp, pixel_data)
+
+ expected = (b'\xe0\x7f\x10\x00' # tag
+ b'OB\x00\x00' # VR
+ b'\xff\xff\xff\xff' # length
+ b'\xfe\xff\x00\xe0\x00\x01\x02\x03' # contents
+ b'\xfe\xff\xdd\xe0\x00\x00\x00\x00') # SQ delimiter
+ self.fp.seek(0)
+ assert self.fp.read() == expected
+
+ def test_big_endian_correct_data(self):
+ """Pixel data starting with an item tag is written."""
+ self.fp.is_little_endian = False
+ self.fp.is_implicit_VR = False
+ pixel_data = DataElement(0x7fe00010, 'OB',
+ b'\xff\xfe\xe0\x00'
+ b'\x00\x01\x02\x03',
+ is_undefined_length=True)
+ write_data_element(self.fp, pixel_data)
+ expected = (b'\x7f\xe0\x00\x10' # tag
+ b'OB\x00\x00' # VR
+ b'\xff\xff\xff\xff' # length
+ b'\xff\xfe\xe0\x00\x00\x01\x02\x03' # contents
+ b'\xff\xfe\xe0\xdd\x00\x00\x00\x00') # SQ delimiter
+ self.fp.seek(0)
+ assert self.fp.read() == expected
+
+ def test_little_endian_incorrect_data(self):
+ """Writing pixel data not starting with an item tag raises."""
+ self.fp.is_little_endian = True
+ self.fp.is_implicit_VR = False
+ pixel_data = DataElement(0x7fe00010, 'OB',
+ b'\xff\xff\x00\xe0'
+ b'\x00\x01\x02\x03'
+ b'\xfe\xff\xdd\xe0',
+ is_undefined_length=True)
+ with pytest.raises(ValueError, match='Pixel Data .* must '
+ 'start with an item tag'):
+ write_data_element(self.fp, pixel_data)
+
+ def test_big_endian_incorrect_data(self):
+ """Writing pixel data not starting with an item tag raises."""
+ self.fp.is_little_endian = False
+ self.fp.is_implicit_VR = False
+ pixel_data = DataElement(0x7fe00010, 'OB',
+ b'\x00\x00\x00\x00'
+ b'\x00\x01\x02\x03'
+ b'\xff\xfe\xe0\xdd',
+ is_undefined_length=True)
+ with pytest.raises(ValueError, match='Pixel Data .+ must '
+ 'start with an item tag'):
+ write_data_element(self.fp, pixel_data)
+
+
if __name__ == "__main__":
# This is called if run alone, but not if loaded through run_tests.py
# If not run from the directory where the sample images are,
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"numpy>=1.16.0",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/pydicom/pydicom.git@1bd33e3ceec19d45844676bdd25367fda4c5319b#egg=pydicom
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- numpy==1.19.5
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_incorrect_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_incorrect_data"
] | [] | [
"pydicom/tests/test_filewriter.py::WriteFileTests::testCT",
"pydicom/tests/test_filewriter.py::WriteFileTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::WriteFileTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMR",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMultiPN",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTDose",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTPlan",
"pydicom/tests/test_filewriter.py::WriteFileTests::testUnicode",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::WriteFileTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_empty_AT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_TM",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_lut_descriptor",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_overlay",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_data",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_one",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_three",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_sequence",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_waveform_bits_allocated",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_big_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_little_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_raises",
"pydicom/tests/test_filewriter.py::ScratchWriteTests::testImpl_LE_deflen_write",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_none_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_bad_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix_none",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_ds_changed",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_raises",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_media_storage_sop_class_uid_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raise_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_add_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_standard",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_commandset_no_written",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_version",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_version_name_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_class_uid_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_ds_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_file_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_read_write_identical",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_correct_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_correct_data"
] | [] | MIT License | 1,609 | 292 | [
"pydicom/filewriter.py"
] |
Azure__msrest-for-python-43 | 11f19f936f2d2d912782c7280f02f01ed89baf47 | 2017-08-22 03:53:10 | 24deba7a7a9e335314058ec2d0b39a710f61be60 | diff --git a/msrest/serialization.py b/msrest/serialization.py
index 6eb8ec9..063f2e6 100644
--- a/msrest/serialization.py
+++ b/msrest/serialization.py
@@ -918,6 +918,9 @@ class Deserializer(object):
'[]': self.deserialize_iter,
'{}': self.deserialize_dict
}
+ self.deserialize_expected_types = {
+ 'duration': (isodate.Duration, datetime.timedelta)
+ }
self.dependencies = dict(classes) if classes else {}
self.key_extractors = [
rest_key_extractor
@@ -1080,6 +1083,8 @@ class Deserializer(object):
if data_type in self.basic_types.values():
return self.deserialize_basic(data, data_type)
if data_type in self.deserialize_type:
+ if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
+ return data
data_val = self.deserialize_type[data_type](data)
return data_val
| Serialization issue if dict syntax and Duration used
```python
msrest.exceptions.SerializationError: Unable to build a model: Unable to deserialize response data. Data: 3 years, 6 months, 4 days, 12:30:05, duration, TypeError: Expecting a string isodate.duration.Duration(4, 45005, 0, years=3, months=6), DeserializationError: Unable to deserialize response data. Data: 3 years, 6 months, 4 days, 12:30:05, duration, TypeError: Expecting a string isodate.duration.Duration(4, 45005, 0, years=3, months=6)
```
Regression introduced in 0.4.12
| Azure/msrest-for-python | diff --git a/tests/test_serialization.py b/tests/test_serialization.py
index f70dcbd..787a086 100644
--- a/tests/test_serialization.py
+++ b/tests/test_serialization.py
@@ -138,7 +138,6 @@ class TestRuntimeSerialized(unittest.TestCase):
class TestObj(Model):
- _validation = {}
_attribute_map = {
'attr_a': {'key':'id', 'type':'str'},
'attr_b': {'key':'AttrB', 'type':'int'},
@@ -147,23 +146,30 @@ class TestRuntimeSerialized(unittest.TestCase):
'attr_e': {'key':'AttrE', 'type': '{float}'},
'attr_f': {'key':'AttrF', 'type': 'duration'},
'attr_g': {'key':'properties.AttrG', 'type':'str'},
- }
-
- def __init__(self):
+ }
- self.attr_a = None
- self.attr_b = None
- self.attr_c = None
- self.attr_d = None
- self.attr_e = None
- self.attr_f = None
- self.attr_g = None
+ def __init__(self,
+ attr_a=None,
+ attr_b=None,
+ attr_c=None,
+ attr_d=None,
+ attr_e=None,
+ attr_f=None,
+ attr_g=None):
+
+ self.attr_a = attr_a
+ self.attr_b = attr_b
+ self.attr_c = attr_c
+ self.attr_d = attr_d
+ self.attr_e = attr_e
+ self.attr_f = attr_f
+ self.attr_g = attr_g
def __str__(self):
return "Test_Object"
def setUp(self):
- self.s = Serializer()
+ self.s = Serializer({'TestObj': self.TestObj})
return super(TestRuntimeSerialized, self).setUp()
def test_serialize_direct_model(self):
@@ -496,6 +502,14 @@ class TestRuntimeSerialized(unittest.TestCase):
message = self.s._serialize(test_obj)
self.assertEquals("P1D", message["AttrF"])
+ test_obj = self.TestObj()
+ test_obj.attr_f = isodate.parse_duration("P3Y6M4DT12H30M5S")
+
+ message = self.s.body({
+ "attr_f": isodate.parse_duration("P3Y6M4DT12H30M5S")},
+ 'TestObj')
+ self.assertEquals("P3Y6M4DT12H30M5S", message["AttrF"])
+
def test_attr_list_simple(self):
"""
Test serializing an object with simple-typed list attributes
@@ -657,8 +671,8 @@ class TestRuntimeSerialized(unittest.TestCase):
g = self.s.body({"test":{"value":"data"}}, 'object')
self.assertEqual(g, {"test":{"value":"data"}})
- h = self.s.serialize_data({"test":self.TestObj()}, 'object')
- self.assertEqual(h, {"test":"Test_Object"})
+ h = self.s.serialize_data({"test":self.TestObj('id')}, 'object')
+ self.assertEqual(h, {"test":{'id': 'id'}})
i = self.s.serialize_data({"test":[1,2,3,4,5]}, 'object')
self.assertEqual(i, {"test":[1,2,3,4,5]})
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt",
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
httpretty==1.1.4
idna==3.10
iniconfig==2.1.0
isodate==0.7.2
-e git+https://github.com/Azure/msrest-for-python.git@11f19f936f2d2d912782c7280f02f01ed89baf47#egg=msrest
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
requests-oauthlib==2.0.0
tomli==2.2.1
urllib3==2.3.0
| name: msrest-for-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- httpretty==1.1.4
- idna==3.10
- iniconfig==2.1.0
- isodate==0.7.2
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- requests-oauthlib==2.0.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/msrest-for-python
| [
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration"
] | [] | [
"tests/test_serialization.py::TestModelDeserialization::test_response",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_int",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_none",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_str",
"tests/test_serialization.py::TestRuntimeSerialized::test_empty_list",
"tests/test_serialization.py::TestRuntimeSerialized::test_key_type",
"tests/test_serialization.py::TestRuntimeSerialized::test_model_validate",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types",
"tests/test_serialization.py::TestRuntimeSerialized::test_validate",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str",
"tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage",
"tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map",
"tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr",
"tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape",
"tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization",
"tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality"
] | [] | MIT License | 1,610 | 227 | [
"msrest/serialization.py"
] |
|
Azure__msrest-for-python-45 | 07cec915d60e29193935dfca17d5e8a7afd0a3d4 | 2017-08-23 16:10:17 | 24deba7a7a9e335314058ec2d0b39a710f61be60 | diff --git a/msrest/serialization.py b/msrest/serialization.py
index 063f2e6..a3d50cd 100644
--- a/msrest/serialization.py
+++ b/msrest/serialization.py
@@ -1200,6 +1200,12 @@ class Deserializer(object):
:param str data: response string to be deserialized.
:rtype: str or unicode
"""
+ # We might be here because we have an enum modeled as string,
+ # and we try to deserialize a partial dict with enum inside
+ if isinstance(data, Enum):
+ return data
+
+ # Consider this is real string
try:
if isinstance(data, unicode):
return data
| v0.4.12 breaks mixed dict with enum if model-as-string=true
This breaks:
``` python
async_security_rule = self.network_client.security_rules.create_or_update(
self.group_name,
security_group_name,
new_security_rule_name,
{
'access':azure.mgmt.network.models.SecurityRuleAccess.allow,
'description':'New Test security rule',
'destination_address_prefix':'*',
'destination_port_range':'123-3500',
'direction':azure.mgmt.network.models.SecurityRuleDirection.outbound,
'priority':400,
'protocol':azure.mgmt.network.models.SecurityRuleProtocol.tcp,
'source_address_prefix':'*',
'source_port_range':'655',
}
)
``` | Azure/msrest-for-python | diff --git a/tests/test_serialization.py b/tests/test_serialization.py
index 787a086..10fb82f 100644
--- a/tests/test_serialization.py
+++ b/tests/test_serialization.py
@@ -353,10 +353,10 @@ class TestRuntimeSerialized(unittest.TestCase):
class TestEnum(Enum):
val = "Value"
- t = test_obj
+ t = test_obj()
t.abc = TestEnum.val
- serialized = self.s._serialize(test_obj)
+ serialized = self.s._serialize(t)
expected = {
"ABC": "Value"
}
@@ -374,6 +374,31 @@ class TestRuntimeSerialized(unittest.TestCase):
with self.assertRaises(SerializationError):
serializer._serialize(t)
+ serializer = Serializer({
+ 'TestEnumObj': test_obj,
+ 'TestEnum': TestEnum
+ })
+ serialized = serializer.body({
+ 'abc': TestEnum.val
+ }, 'TestEnumObj')
+ expected = {
+ 'ABC': 'Value'
+ }
+ self.assertEqual(expected, serialized)
+
+ # model-as-string=True
+ test_obj._attribute_map = {
+ "abc":{"key":"ABC", "type":"str"}
+ }
+ serialized = serializer.body({
+ 'abc': TestEnum.val
+ }, 'TestEnumObj')
+ expected = {
+ 'ABC': 'Value'
+ }
+ self.assertEqual(expected, serialized)
+
+
def test_attr_none(self):
"""
Test serializing an object with None attributes.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt",
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
httpretty==1.1.4
idna==3.10
iniconfig==2.1.0
isodate==0.7.2
-e git+https://github.com/Azure/msrest-for-python.git@07cec915d60e29193935dfca17d5e8a7afd0a3d4#egg=msrest
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
requests-oauthlib==2.0.0
tomli==2.2.1
urllib3==2.3.0
| name: msrest-for-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- httpretty==1.1.4
- idna==3.10
- iniconfig==2.1.0
- isodate==0.7.2
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- requests-oauthlib==2.0.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/msrest-for-python
| [
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum"
] | [] | [
"tests/test_serialization.py::TestModelDeserialization::test_response",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_int",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_none",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_str",
"tests/test_serialization.py::TestRuntimeSerialized::test_empty_list",
"tests/test_serialization.py::TestRuntimeSerialized::test_key_type",
"tests/test_serialization.py::TestRuntimeSerialized::test_model_validate",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types",
"tests/test_serialization.py::TestRuntimeSerialized::test_validate",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str",
"tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage",
"tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map",
"tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr",
"tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape",
"tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization",
"tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality"
] | [] | MIT License | 1,614 | 169 | [
"msrest/serialization.py"
] |
|
pre-commit__pre-commit-592 | 7139a47c1ca968a2699e467279677fa77ad68aae | 2017-08-23 17:24:54 | 7139a47c1ca968a2699e467279677fa77ad68aae | diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
index c18f2aa..55d2b12 100644
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -217,7 +217,7 @@ def _has_unstaged_config(runner):
def run(runner, args, environ=os.environ):
- no_stash = args.no_stash or args.all_files or bool(args.files)
+ no_stash = args.all_files or bool(args.files)
# Check if we have unresolved merge conflict files and fail fast.
if _has_unmerged_paths(runner):
@@ -227,20 +227,11 @@ def run(runner, args, environ=os.environ):
logger.error('Specify both --origin and --source.')
return 1
if _has_unstaged_config(runner) and not no_stash:
- if args.allow_unstaged_config:
- logger.warn(
- 'You have an unstaged config file and have specified the '
- '--allow-unstaged-config option.\n'
- 'Note that your config will be stashed before the config is '
- 'parsed unless --no-stash is specified.',
- )
- else:
- logger.error(
- 'Your .pre-commit-config.yaml is unstaged.\n'
- '`git add .pre-commit-config.yaml` to fix this.\n'
- 'Run pre-commit with --allow-unstaged-config to silence this.',
- )
- return 1
+ logger.error(
+ 'Your .pre-commit-config.yaml is unstaged.\n'
+ '`git add .pre-commit-config.yaml` to fix this.\n',
+ )
+ return 1
# Expose origin / source as environment variables for hooks to consume
if args.origin and args.source:
diff --git a/pre_commit/main.py b/pre_commit/main.py
index 3a2fee1..0b00a86 100644
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -135,10 +135,6 @@ def main(argv=None):
_add_color_option(run_parser)
_add_config_option(run_parser)
run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')
- run_parser.add_argument(
- '--no-stash', default=False, action='store_true',
- help='Use this option to prevent auto stashing of unstaged files.',
- )
run_parser.add_argument(
'--verbose', '-v', action='store_true', default=False,
)
@@ -154,13 +150,6 @@ def main(argv=None):
'--commit-msg-filename',
help='Filename to check when running during `commit-msg`',
)
- run_parser.add_argument(
- '--allow-unstaged-config', default=False, action='store_true',
- help=(
- 'Allow an unstaged config to be present. Note that this will '
- 'be stashed before parsing unless --no-stash is specified.'
- ),
- )
run_parser.add_argument(
'--hook-stage', choices=('commit', 'push', 'commit-msg'),
default='commit',
@@ -173,7 +162,7 @@ def main(argv=None):
run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
run_mutex_group.add_argument(
'--all-files', '-a', action='store_true', default=False,
- help='Run on all the files in the repo. Implies --no-stash.',
+ help='Run on all the files in the repo.',
)
run_mutex_group.add_argument(
'--files', nargs='*', default=[],
| Deprecate and remove some (useless?) options
I find the following don't really have any good use cases (and don't come up in normal day-to-day) and are undocumented beyond `--help`. **I'm proposing removing these options**:
## `pre-commit run --no-stash`
This disables the auto-stashing of files when running -- though this is already the case for `pre-commit run --all-files` and `pre-commit run --files ...`.
The behaviour of `--no-stash` (without using `--no-stash`) can be achieved via `git diff --name-only | xargs pre-commit run --files`
It was added [along with the avoiding behaviour](https://github.com/pre-commit/pre-commit/pull/80) in the same pull request. I want to say this was my first idea for "fixing" the [original problem](https://github.com/pre-commit/pre-commit/issues/68) and then I forgot to undo it.
## `pre-commit run --allow-unstaged-config`
This (unfortunately) collides with `pre-commit run --all` (prefix match of `pre-commit run --all-files`) so I've wanted to get rid of it anyway.
This allows one to run with an unstaged configuration, which then (in most cases) gets the changes swiped out from under you and causes confusing situations where the hooks that are run aren't what was on disk at the time of running. The warning that's printed when doing this also explains this.
This was [originally my idea](https://github.com/pre-commit/pre-commit/issues/157#issuecomment-99080756) but now I think we can just do without the option at all -- requiring the pre-commit configuration to be staged when running pre-commit. | pre-commit/pre-commit | diff --git a/tests/commands/run_test.py b/tests/commands/run_test.py
index c360fde..924d097 100644
--- a/tests/commands/run_test.py
+++ b/tests/commands/run_test.py
@@ -54,10 +54,8 @@ def _get_opts(
color=False,
verbose=False,
hook=None,
- no_stash=False,
origin='',
source='',
- allow_unstaged_config=False,
hook_stage='commit',
show_diff_on_failure=False,
commit_msg_filename='',
@@ -70,10 +68,8 @@ def _get_opts(
color=color,
verbose=verbose,
hook=hook,
- no_stash=no_stash,
origin=origin,
source=source,
- allow_unstaged_config=allow_unstaged_config,
hook_stage=hook_stage,
show_diff_on_failure=show_diff_on_failure,
commit_msg_filename=commit_msg_filename,
@@ -332,38 +328,6 @@ def test_origin_source_error_msg(
assert warning_msg not in printed
[email protected](
- ('no_stash', 'all_files', 'expect_stash'),
- (
- (True, True, False),
- (True, False, False),
- (False, True, False),
- (False, False, True),
- ),
-)
-def test_no_stash(
- cap_out,
- repo_with_passing_hook,
- no_stash,
- all_files,
- expect_stash,
- mock_out_store_directory,
-):
- stage_a_file()
- # Make unstaged changes
- with open('foo.py', 'w') as foo_file:
- foo_file.write('import os\n')
-
- args = _get_opts(no_stash=no_stash, all_files=all_files)
- ret, printed = _do_run(cap_out, repo_with_passing_hook, args)
- assert ret == 0
- warning_msg = b'[WARNING] Unstaged files detected.'
- if expect_stash:
- assert warning_msg in printed
- else:
- assert warning_msg not in printed
-
-
@pytest.mark.parametrize(('output', 'expected'), (('some', True), ('', False)))
def test_has_unmerged_paths(output, expected):
mock_runner = mock.Mock()
@@ -715,37 +679,19 @@ def modified_config_repo(repo_with_passing_hook):
yield repo_with_passing_hook
-def test_allow_unstaged_config_option(
+def test_error_with_unstaged_config(
cap_out, modified_config_repo, mock_out_store_directory,
):
- args = _get_opts(allow_unstaged_config=True)
- ret, printed = _do_run(cap_out, modified_config_repo, args)
- expected = (
- b'You have an unstaged config file and have specified the '
- b'--allow-unstaged-config option.'
- )
- assert expected in printed
- assert ret == 0
-
-
-def test_no_allow_unstaged_config_option(
- cap_out, modified_config_repo, mock_out_store_directory,
-):
- args = _get_opts(allow_unstaged_config=False)
+ args = _get_opts()
ret, printed = _do_run(cap_out, modified_config_repo, args)
assert b'Your .pre-commit-config.yaml is unstaged.' in printed
assert ret == 1
@pytest.mark.parametrize(
- 'opts',
- (
- {'allow_unstaged_config': False, 'no_stash': True},
- {'all_files': True},
- {'files': [C.CONFIG_FILE]},
- ),
+ 'opts', ({'all_files': True}, {'files': [C.CONFIG_FILE]}),
)
-def test_unstaged_message_suppressed(
+def test_no_unstaged_error_with_all_files_or_files(
cap_out, modified_config_repo, mock_out_store_directory, opts,
):
args = _get_opts(**opts)
diff --git a/tests/git_test.py b/tests/git_test.py
index 4ffccee..0500a42 100644
--- a/tests/git_test.py
+++ b/tests/git_test.py
@@ -137,8 +137,7 @@ def test_get_conflicted_files_in_submodule(in_conflicting_submodule):
def test_get_conflicted_files_unstaged_files(in_merge_conflict):
- # If they for whatever reason did pre-commit run --no-stash during a
- # conflict
+ """This case no longer occurs, but it is a useful test nonetheless"""
resolve_conflict()
# Make unstaged file.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 2
} | 0.16 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
cached-property==2.0.1
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
iniconfig==2.1.0
mccabe==0.7.0
mock==5.2.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/pre-commit/pre-commit.git@7139a47c1ca968a2699e467279677fa77ad68aae#egg=pre_commit
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
pytest-env==1.1.5
PyYAML==6.0.2
six==1.17.0
tomli==2.2.1
virtualenv==20.29.3
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- cached-property==2.0.1
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- iniconfig==2.1.0
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- pytest-env==1.1.5
- pyyaml==6.0.2
- six==1.17.0
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/pre-commit
| [
"tests/commands/run_test.py::test_run_all_hooks_failing",
"tests/commands/run_test.py::test_hook_that_modifies_but_returns_zero",
"tests/commands/run_test.py::test_types_hook_repository",
"tests/commands/run_test.py::test_exclude_types_hook_repository",
"tests/commands/run_test.py::test_show_diff_on_failure",
"tests/commands/run_test.py::test_run[options0-outputs0-0-True]",
"tests/commands/run_test.py::test_run[options1-outputs1-0-True]",
"tests/commands/run_test.py::test_run[options2-outputs2-0-True]",
"tests/commands/run_test.py::test_run[options3-outputs3-1-True]",
"tests/commands/run_test.py::test_run[options4-outputs4-0-True]",
"tests/commands/run_test.py::test_run[options5-outputs5-0-True]",
"tests/commands/run_test.py::test_run[options6-outputs6-0-False]",
"tests/commands/run_test.py::test_run_output_logfile",
"tests/commands/run_test.py::test_always_run",
"tests/commands/run_test.py::test_always_run_alt_config",
"tests/commands/run_test.py::test_origin_source_error_msg[master-master-False]",
"tests/commands/run_test.py::test_origin_source_error_msg[master--True]",
"tests/commands/run_test.py::test_origin_source_error_msg[-master-True]",
"tests/commands/run_test.py::test_merge_conflict",
"tests/commands/run_test.py::test_merge_conflict_modified",
"tests/commands/run_test.py::test_merge_conflict_resolved",
"tests/commands/run_test.py::test_skip_hook",
"tests/commands/run_test.py::test_hook_id_not_in_non_verbose_output",
"tests/commands/run_test.py::test_hook_id_in_verbose_output",
"tests/commands/run_test.py::test_multiple_hooks_same_id",
"tests/commands/run_test.py::test_push_hook",
"tests/commands/run_test.py::test_commit_msg_hook",
"tests/commands/run_test.py::test_local_hook_passes",
"tests/commands/run_test.py::test_local_hook_fails",
"tests/commands/run_test.py::test_error_with_unstaged_config",
"tests/commands/run_test.py::test_no_unstaged_error_with_all_files_or_files[opts0]",
"tests/commands/run_test.py::test_no_unstaged_error_with_all_files_or_files[opts1]",
"tests/commands/run_test.py::test_pass_filenames[True-hook_args0-foo.py]",
"tests/commands/run_test.py::test_pass_filenames[False-hook_args1-]",
"tests/commands/run_test.py::test_pass_filenames[True-hook_args2-some",
"tests/commands/run_test.py::test_pass_filenames[False-hook_args3-some"
] | [
"tests/commands/run_test.py::test_arbitrary_bytes_hook",
"tests/commands/run_test.py::test_hook_install_failure"
] | [
"tests/commands/run_test.py::test_has_unmerged_paths[some-True]",
"tests/commands/run_test.py::test_has_unmerged_paths[-False]",
"tests/commands/run_test.py::test_compute_cols[hooks0-True-80]",
"tests/commands/run_test.py::test_compute_cols[hooks1-False-81]",
"tests/commands/run_test.py::test_compute_cols[hooks2-True-85]",
"tests/commands/run_test.py::test_compute_cols[hooks3-False-82]",
"tests/commands/run_test.py::test_get_skips[environ0-expected_output0]",
"tests/commands/run_test.py::test_get_skips[environ1-expected_output1]",
"tests/commands/run_test.py::test_get_skips[environ2-expected_output2]",
"tests/commands/run_test.py::test_get_skips[environ3-expected_output3]",
"tests/commands/run_test.py::test_get_skips[environ4-expected_output4]",
"tests/commands/run_test.py::test_get_skips[environ5-expected_output5]",
"tests/commands/run_test.py::test_get_skips[environ6-expected_output6]",
"tests/commands/run_test.py::test_non_ascii_hook_id",
"tests/commands/run_test.py::test_stdout_write_bug_py26",
"tests/commands/run_test.py::test_get_changed_files",
"tests/commands/run_test.py::test_lots_of_files",
"tests/commands/run_test.py::test_files_running_subdir",
"tests/git_test.py::test_get_root_at_root",
"tests/git_test.py::test_get_root_deeper",
"tests/git_test.py::test_get_root_not_git_dir",
"tests/git_test.py::test_get_staged_files_deleted",
"tests/git_test.py::test_is_not_in_merge_conflict",
"tests/git_test.py::test_is_in_merge_conflict",
"tests/git_test.py::test_cherry_pick_conflict",
"tests/git_test.py::test_get_files_matching_base",
"tests/git_test.py::test_matches_broken_symlink",
"tests/git_test.py::test_get_files_matching_total_match",
"tests/git_test.py::test_does_search_instead_of_match",
"tests/git_test.py::test_does_not_include_deleted_fileS",
"tests/git_test.py::test_exclude_removes_files",
"tests/git_test.py::test_get_conflicted_files",
"tests/git_test.py::test_get_conflicted_files_unstaged_files",
"tests/git_test.py::test_parse_merge_msg_for_conflicts[Merge"
] | [] | MIT License | 1,615 | 844 | [
"pre_commit/commands/run.py",
"pre_commit/main.py"
] |
|
rsheftel__pandas_market_calendars-11 | 72223f2615375c1ec321eb22abdaf9fda5bea1e5 | 2017-08-23 23:58:58 | 72223f2615375c1ec321eb22abdaf9fda5bea1e5 | diff --git a/pandas_market_calendars/calendar_utils.py b/pandas_market_calendars/calendar_utils.py
index 55d1c3d..62f51eb 100644
--- a/pandas_market_calendars/calendar_utils.py
+++ b/pandas_market_calendars/calendar_utils.py
@@ -57,9 +57,9 @@ def merge_schedules(schedules, how='outer'):
:param how: outer or inner
:return: schedule DataFrame
"""
-
- result = schedules.pop(0)
- for schedule in schedules:
+
+ result = schedules[0]
+ for schedule in schedules[1:]:
result = result.merge(schedule, how=how, right_index=True, left_index=True)
if how == 'outer':
result['market_open'] = result.apply(lambda x: min(x.market_open_x, x.market_open_y), axis=1)
@@ -69,24 +69,25 @@ def merge_schedules(schedules, how='outer'):
result['market_close'] = result.apply(lambda x: min(x.market_close_x, x.market_close_y), axis=1)
else:
raise ValueError('how argument must be "inner" or "outer"')
- return result[['market_open', 'market_close']]
+ result = result[['market_open', 'market_close']]
+ return result
def date_range(schedule, frequency, closed='right', force_close=True, **kwargs):
"""
- Given a schedule will return a DatetimeIndex will all of the valid datetime at the frequency given.
+ Given a schedule will return a DatetimeIndex will all of the valid datetime at the frequency given.
The schedule values are assumed to be in UTC.
:param schedule: schedule DataFrame
:param frequency: frequency in standard string
:param closed: same meaning as pandas date_range. 'right' will exclude the first value and should be used when the
results should only include the close for each bar.
- :param force_close: if True then the close of the day will be included even if it does not fall on an even
+ :param force_close: if True then the close of the day will be included even if it does not fall on an even
frequency. If False then the market close for the day may not be included in the results
:param kwargs: arguments that will be passed to the pandas date_time
:return: DatetimeIndex
"""
-
+
kwargs['closed'] = closed
ranges = list()
for row in schedule.itertuples():
| Issues with merge_schedules when list is larger than 2
Firstly thanks for the great project, it's very helpful. I am having issues with `merge_schedules` when the input list is greater than two. The issue seems to be that multiple calls to `pd.merge` happen causing repeated columns, e.g. `market_open_x`, which then fails when the `lambda` function is applied. Here is an illustrative example
```python
import pandas_market_calendars as mcal
start_date = "20170103"
end_date = "20170104"
cme = mcal.get_calendar("CME")
nyse = mcal.get_calendar("NYSE")
ice = mcal.get_calendar("ICE")
s1 = cme.schedule(start_date, end_date)
s2 = nyse.schedule(start_date, end_date)
s3 = ice.schedule(start_date, end_date)
schedules = [s1, s2, s3]
mcal.merge_schedules(schedules, how='inner')
ValueError: ('Can only compare identically-labeled Series objects', 'occurred at index 2017-01-03 00:00:00')
```
As described above, here is an illustration of the internal code causing the
issue
```python
how = "inner"
result = s1
result = result.merge(s2, how=how, right_index=True, left_index=True)
result['market_open'] = result.apply(lambda x: max(x.market_open_x, x.market_open_y), axis=1)
result['market_close'] = result.apply(lambda x: min(x.market_close_x, x.market_close_y), axis=1)
result = result.merge(s3, how=how, right_index=True, left_index=True)
print(result)
```
```
market_open_x market_close_x \
2017-01-03 2017-01-02 23:01:00+00:00 2017-01-03 23:00:00+00:00
2017-01-04 2017-01-03 23:01:00+00:00 2017-01-04 23:00:00+00:00
market_open_y market_close_y \
2017-01-03 2017-01-03 14:30:00+00:00 2017-01-03 21:00:00+00:00
2017-01-04 2017-01-04 14:30:00+00:00 2017-01-04 21:00:00+00:00
market_open_x market_close_x \
2017-01-03 2017-01-03 14:30:00+00:00 2017-01-03 21:00:00+00:00
2017-01-04 2017-01-04 14:30:00+00:00 2017-01-04 21:00:00+00:00
market_open_y market_close_y
2017-01-03 2017-01-03 01:01:00+00:00 2017-01-03 23:00:00+00:00
2017-01-04 2017-01-04 01:01:00+00:00 2017-01-04 23:00:00+00:00
```
In addition, the use of `pop` on the input list has the side effect of changing
the input, I believe this is an unintended side effect? | rsheftel/pandas_market_calendars | diff --git a/tests/test_utils.py b/tests/test_utils.py
index 743f84e..05a4a98 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -200,5 +200,9 @@ def test_merge_schedules():
actual = mcal.merge_schedules([sch1, sch2], how='inner')
assert_frame_equal(actual, expected)
+ # joining more than two calendars works correctly
+ actual = mcal.merge_schedules([sch1, sch1, sch1], how='inner')
+ assert_frame_equal(actual, sch1)
+
with pytest.raises(ValueError):
mcal.merge_schedules([sch1, sch2], how='left')
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
execnet==1.9.0
importlib-metadata==4.8.3
iniconfig==1.1.1
numpy==1.19.5
packaging==21.3
pandas==1.1.5
-e git+https://github.com/rsheftel/pandas_market_calendars.git@72223f2615375c1ec321eb22abdaf9fda5bea1e5#egg=pandas_market_calendars
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli==1.2.3
toolz==0.12.0
typing_extensions==4.1.1
zipp==3.6.0
| name: pandas_market_calendars
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tomli==1.2.3
- toolz==0.12.0
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pandas_market_calendars
| [
"tests/test_utils.py::test_merge_schedules"
] | [] | [
"tests/test_utils.py::test_get_calendar",
"tests/test_utils.py::test_date_range_daily",
"tests/test_utils.py::test_date_range_hour",
"tests/test_utils.py::test_date_range_minute"
] | [] | MIT License | 1,616 | 560 | [
"pandas_market_calendars/calendar_utils.py"
] |
|
jupyter__nbgrader-873 | 9822e38532e0c5a31a26316a16d539d51324c424 | 2017-08-24 19:18:13 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | jhamrick: Turns out this fix is not actually correct, though. That's what I get for submitting a PR without running all the tests locally... | diff --git a/nbgrader/api.py b/nbgrader/api.py
index cb9b552b..6c79aabc 100644
--- a/nbgrader/api.py
+++ b/nbgrader/api.py
@@ -2318,7 +2318,8 @@ class Gradebook(object):
A list of dictionaries, one per student
"""
- if len(self.assignments) > 0:
+ total_score, = self.db.query(func.sum(Assignment.max_score)).one()
+ if len(self.assignments) > 0 and total_score > 0:
# subquery the scores
scores = self.db.query(
Student.id,
@@ -2332,7 +2333,7 @@ class Gradebook(object):
students = self.db.query(
Student.id, Student.first_name, Student.last_name,
Student.email, _scores,
- func.sum(GradeCell.max_score)
+ func.sum(Assignment.max_score)
).outerjoin(scores, Student.id == scores.c.id)\
.group_by(
Student.id, Student.first_name, Student.last_name,
| Error generating assignment and managing students
Firstly, I apologize if this is not the proper place to report issues like this. I am exploring nbgrader, and have had some trouble getting it to work.
I have installed nbgrader on a local Jupyterhub installation and have been working through the example notebooks.
I had to create a `~/.jupyter/nbgrader_config.py` file that has the following contents:
```
c = get_config()
c.CourseDirectory.root = '~/Jupyter/ChEn6703_test_nbgrader'
```
(note that I used the full path above, but replaced the prefix with `~` for security reasons)
There are a few strange things going on though:
## Problems with student entries
1. When I go to the `Manage Students` section of nbgrader, it doesn't show any students.
1. When I do `nbgrader db student list --log-level='DEBUG'` I get something which is inconsistent with the empty list in the `Manage Students` dialog.
```
[DbStudentListApp | DEBUG] Searching ['~/Jupyter/ChEn6703_test_nbgrader', '~/.jupyter', '/usr/etc/jupyter', '/usr/local/etc/jupyter', '/etc/jupyter'] for config files
[DbStudentListApp | DEBUG] Looking for jupyter_config in /etc/jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in /usr/local/etc/jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in /usr/etc/jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in ~/.jupyter
[DbStudentListApp | DEBUG] Looking for jupyter_config in ~/Jupyter/ChEn6703_test_nbgrader
[DbStudentListApp | DEBUG] Looking for nbgrader_config in /etc/jupyter
[DbStudentListApp | DEBUG] Looking for nbgrader_config in /usr/local/etc/jupyter
[DbStudentListApp | DEBUG] Looking for nbgrader_config in /usr/etc/jupyter
[DbStudentListApp | DEBUG] Looking for nbgrader_config in ~/.jupyter
[DbStudentListApp | DEBUG] Loaded config file: ~/.jupyter/nbgrader_config.py
[DbStudentListApp | DEBUG] Looking for nbgrader_config in ~/Jupyter/ChEn6703_test_nbgrader
[DbStudentListApp | DEBUG] Loaded config file: ~/Jupyter/ChEn6703_test_nbgrader/nbgrader_conf
ig.py
[DbStudentListApp | DEBUG] Looking for nbgrader_config in ~/Jupyter/ChEn6703_test_nbgrader
[DbStudentListApp | DEBUG] Loaded config file: ~/Jupyter/ChEn6703_test_nbgrader/nbgrader_conf
ig.py
There are 1 students in the database:
1 (Flinstone, Fred) -- None
```
3. If I manually enter a student in the `Manage Students` dialog, the student shows up and then disappears.
## Problems when generating the assignment
When I go to `Manage Assignments` and click on the `Generate` icon for the example `ps1` assignment, I get:
```
[INFO] Copying ~/Jupyter/ChEn6703_test_nbgrader/source/./ps1/jupyter.png -> ~/Jupyter/ChEn6703_test_nbgrader/release/./ps1/jupyter.png
[INFO] Updating/creating assignment 'ps1': {}
[INFO] Converting notebook ~/Jupyter/ChEn6703_test_nbgrader/source/./ps1/problem1.ipynb
[ERROR] There was an error processing assignment: ~/Jupyter/ChEn6703_test_nbgrader/source/./ps1
[ERROR] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/nbgrader/converters/base.py", line 288, in convert_notebooks
self.convert_single_notebook(notebook_filename)
File "/usr/local/lib/python3.5/dist-packages/nbgrader/converters/base.py", line 244, in convert_single_notebook
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 172, in from_filename
return self.from_file(f, resources=resources, **kw)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 190, in from_file
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/notebook.py", line 31, in from_notebook_node
nb_copy, resources = super(NotebookExporter, self).from_notebook_node(nb, resources, **kw)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 132, in from_notebook_node
nb_copy, resources = self._preprocess(nb_copy, resources)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/exporters/exporter.py", line 309, in _preprocess
nbc, resc = preprocessor(nbc, resc)
File "/usr/local/lib/python3.5/dist-packages/nbconvert/preprocessors/base.py", line 47, in __call__
return self.preprocess(nb,resources)
File "/usr/local/lib/python3.5/dist-packages/nbgrader/preprocessors/headerfooter.py", line 23, in preprocess
with open(self.header, 'r') as fh:
FileNotFoundError: [Errno 2] No such file or directory: 'source/header.ipynb'
[WARNING] Removing failed assignment: ~/Jupyter/ChEn6703_test_nbgrader/release/ps1
[ERROR] There was an error processing assignment 'ps1' for student '.'
[ERROR] Please see the the above traceback for details on the specific errors on the above failures.
Traceback
```
I have tried regenerating the entire class and this issue persists.
If I try the command line approach: `nbgrader assign source/ps1/` I get:
```
[AssignApp | ERROR] No notebooks were matched by ~/Jupyter/ChEn6703_test_nbgrader/source/./s
```
Note the `source/./s` which seems problematic.
Any ideas on this? | jupyter/nbgrader | diff --git a/nbgrader/tests/api/test_gradebook.py b/nbgrader/tests/api/test_gradebook.py
index 361a4def..69c181d1 100644
--- a/nbgrader/tests/api/test_gradebook.py
+++ b/nbgrader/tests/api/test_gradebook.py
@@ -728,6 +728,12 @@ def test_student_dicts(assignment):
assert a == b
+def test_student_dicts_zero_points(gradebook):
+ gradebook.add_assignment("ps1")
+ s = gradebook.add_student("1234")
+ assert gradebook.student_dicts() == [s.to_dict()]
+
+
def test_notebook_submission_dicts(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-rerunfailures coverage selenium invoke sphinx codecov cov-core",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@9822e38532e0c5a31a26316a16d539d51324c424#egg=nbgrader
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/api/test_gradebook.py::test_student_dicts_zero_points"
] | [
"nbgrader/tests/api/test_gradebook.py::test_create_invalid_grade_cell",
"nbgrader/tests/api/test_gradebook.py::test_create_invalid_source_cell",
"nbgrader/tests/api/test_gradebook.py::test_notebook_submission_dicts",
"nbgrader/tests/api/test_gradebook.py::test_submission_dicts"
] | [
"nbgrader/tests/api/test_gradebook.py::test_init",
"nbgrader/tests/api/test_gradebook.py::test_add_student",
"nbgrader/tests/api/test_gradebook.py::test_add_duplicate_student",
"nbgrader/tests/api/test_gradebook.py::test_find_student",
"nbgrader/tests/api/test_gradebook.py::test_find_nonexistant_student",
"nbgrader/tests/api/test_gradebook.py::test_remove_student",
"nbgrader/tests/api/test_gradebook.py::test_update_or_create_student",
"nbgrader/tests/api/test_gradebook.py::test_add_assignment",
"nbgrader/tests/api/test_gradebook.py::test_add_duplicate_assignment",
"nbgrader/tests/api/test_gradebook.py::test_find_assignment",
"nbgrader/tests/api/test_gradebook.py::test_find_nonexistant_assignment",
"nbgrader/tests/api/test_gradebook.py::test_remove_assignment",
"nbgrader/tests/api/test_gradebook.py::test_update_or_create_assignment",
"nbgrader/tests/api/test_gradebook.py::test_add_notebook",
"nbgrader/tests/api/test_gradebook.py::test_add_duplicate_notebook",
"nbgrader/tests/api/test_gradebook.py::test_find_notebook",
"nbgrader/tests/api/test_gradebook.py::test_find_nonexistant_notebook",
"nbgrader/tests/api/test_gradebook.py::test_update_or_create_notebook",
"nbgrader/tests/api/test_gradebook.py::test_remove_notebook",
"nbgrader/tests/api/test_gradebook.py::test_add_grade_cell",
"nbgrader/tests/api/test_gradebook.py::test_add_grade_cell_with_args",
"nbgrader/tests/api/test_gradebook.py::test_add_duplicate_grade_cell",
"nbgrader/tests/api/test_gradebook.py::test_find_grade_cell",
"nbgrader/tests/api/test_gradebook.py::test_find_nonexistant_grade_cell",
"nbgrader/tests/api/test_gradebook.py::test_update_or_create_grade_cell",
"nbgrader/tests/api/test_gradebook.py::test_add_solution_cell",
"nbgrader/tests/api/test_gradebook.py::test_add_duplicate_solution_cell",
"nbgrader/tests/api/test_gradebook.py::test_find_solution_cell",
"nbgrader/tests/api/test_gradebook.py::test_find_nonexistant_solution_cell",
"nbgrader/tests/api/test_gradebook.py::test_update_or_create_solution_cell",
"nbgrader/tests/api/test_gradebook.py::test_add_source_cell",
"nbgrader/tests/api/test_gradebook.py::test_add_source_cell_with_args",
"nbgrader/tests/api/test_gradebook.py::test_add_duplicate_source_cell",
"nbgrader/tests/api/test_gradebook.py::test_find_source_cell",
"nbgrader/tests/api/test_gradebook.py::test_find_nonexistant_source_cell",
"nbgrader/tests/api/test_gradebook.py::test_update_or_create_source_cell",
"nbgrader/tests/api/test_gradebook.py::test_add_submission",
"nbgrader/tests/api/test_gradebook.py::test_add_duplicate_submission",
"nbgrader/tests/api/test_gradebook.py::test_remove_submission",
"nbgrader/tests/api/test_gradebook.py::test_update_or_create_submission",
"nbgrader/tests/api/test_gradebook.py::test_find_submission_notebook",
"nbgrader/tests/api/test_gradebook.py::test_find_submission_notebook_by_id",
"nbgrader/tests/api/test_gradebook.py::test_remove_submission_notebook",
"nbgrader/tests/api/test_gradebook.py::test_find_grade",
"nbgrader/tests/api/test_gradebook.py::test_find_grade_by_id",
"nbgrader/tests/api/test_gradebook.py::test_find_comment",
"nbgrader/tests/api/test_gradebook.py::test_find_comment_by_id",
"nbgrader/tests/api/test_gradebook.py::test_average_assignment_score",
"nbgrader/tests/api/test_gradebook.py::test_average_notebook_score",
"nbgrader/tests/api/test_gradebook.py::test_student_dicts"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,619 | 256 | [
"nbgrader/api.py"
] |
OpenMined__PySyft-188 | a81fde47c4593a787f9bb61dd655af58497bef4a | 2017-08-26 08:14:00 | a81fde47c4593a787f9bb61dd655af58497bef4a | diff --git a/syft/tensor.py b/syft/tensor.py
index f9eff20a64..ee9cf8715d 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -518,3 +518,41 @@ class TensorBase(object):
if self.encrypted:
return NotImplemented
self.data = 1 / np.array(self.data)
+
+ def log(self):
+ """performs elementwise logarithm operation
+ and returns a new Tensor"""
+ if self.encrypted:
+ return NotImplemented
+ out = np.log(self.data)
+ return TensorBase(out)
+
+ def log_(self):
+ """performs elementwise logarithm operation inplace"""
+ if self.encrypted:
+ return NotImplemented
+ self.data = np.log(self.data)
+ return self
+
+ def log1p(self):
+ """performs elementwise log(1+x) operation
+ and returns new tensor"""
+ if self.encrypted:
+ return NotImplemented
+ out = np.log1p(self.data)
+ return TensorBase(out)
+
+ def log1p_(self):
+ """performs elementwise log(1+x) operation inplace"""
+ if self.encrypted:
+ return NotImplemented
+ self.data = np.log1p(self.data)
+ return self
+
+ def log_normal_(self, mean=0, stdev=1.0):
+ """Fills give tensor with samples from a lognormal distribution
+ with given mean and stdev"""
+ if self.encrypted:
+ return NotImplemented
+ self.data = np.random.lognormal(mean, stdev, self.shape())
+ return self
| Implement Default log Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, log(), log1p(), and log_normal() should return a new tensor and log_(), log1p_(), and log_normal_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index 1ae53eb8ce..d73c443827 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -2,6 +2,7 @@ from syft import TensorBase
import syft
import unittest
import numpy as np
+import math
# Here's our "unit tests".
@@ -374,3 +375,21 @@ class reciprocalTests(unittest.TestCase):
t1 = TensorBase(np.array([2, 3, 4]))
t1.reciprocal_()
self.assertTrue(np.allclose(t1.data, [0.5, 0.33333333, 0.25]))
+
+
+class logTests(unittest.TestCase):
+ def testLog(self):
+ t1 = TensorBase(np.array([math.exp(1), math.exp(2), math.exp(3)]))
+ self.assertTrue(np.array_equal((t1.log()).data, [1., 2., 3.]))
+
+ def testLog_(self):
+ t1 = TensorBase(np.array([math.exp(1), math.exp(2), math.exp(3)]))
+ self.assertTrue(np.array_equal((t1.log_()).data, [1., 2., 3.]))
+
+ def testLog1p(self):
+ t1 = TensorBase(np.array([1, 2, 3]))
+ self.assertTrue(np.allclose((t1.log1p()).data, [0.69314718, 1.09861229, 1.38629436]))
+
+ def testLog1p_(self):
+ t1 = TensorBase(np.array([1, 2, 3]))
+ self.assertTrue(np.allclose((t1.log1p_()).data, [0.69314718, 1.09861229, 1.38629436]))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
clint==0.5.1
exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
line_profiler==4.2.0
mccabe==0.7.0
numpy==1.26.4
packaging==24.2
phe==1.5.0
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyRserve==1.0.4
pytest==8.3.5
pytest-flake8==1.3.0
-e git+https://github.com/OpenMined/PySyft.git@a81fde47c4593a787f9bb61dd655af58497bef4a#egg=syft
tomli==2.2.1
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- clint==0.5.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- line-profiler==4.2.0
- mccabe==0.7.0
- numpy==1.26.4
- packaging==24.2
- phe==1.5.0
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyrserve==1.0.4
- pytest==8.3.5
- pytest-flake8==1.3.0
- tomli==2.2.1
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_"
] | [] | [
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_"
] | [] | Apache License 2.0 | 1,625 | 397 | [
"syft/tensor.py"
] |
|
ucfopen__canvasapi-65 | 9fd5a6a5dfcbbaae52b9d42752ffee163852b1f9 | 2017-08-31 15:15:45 | f2faa1835e104aae764a1fc7638c284d2888639f | diff --git a/canvasapi/requester.py b/canvasapi/requester.py
index 2388493..0c09669 100644
--- a/canvasapi/requester.py
+++ b/canvasapi/requester.py
@@ -1,4 +1,5 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+from datetime import datetime
import requests
@@ -58,6 +59,20 @@ class Requester(object):
auth_header = {'Authorization': 'Bearer %s' % (self.access_token)}
headers.update(auth_header)
+ # Convert kwargs into list of 2-tuples and combine with _kwargs.
+ _kwargs = [] if _kwargs is None else _kwargs
+ for kw, arg, in kwargs.items():
+ _kwargs.append((kw, arg))
+
+ # Do any final argument processing before sending to request method.
+ for i, kwarg in enumerate(_kwargs):
+ kw, arg = kwarg
+
+ # Convert any datetime objects into ISO 8601 formatted strings.
+ if isinstance(arg, datetime):
+ _kwargs[i] = (kw, arg.isoformat())
+
+ # Determine the appropriate request method.
if method == 'GET':
req_method = self._get_request
elif method == 'POST':
@@ -67,13 +82,10 @@ class Requester(object):
elif method == 'PUT':
req_method = self._put_request
- # Convert kwargs into list of 2-tuples and combine with _kwargs.
- _kwargs = [] if _kwargs is None else _kwargs
- for kw, arg, in kwargs.items():
- _kwargs.append((kw, arg))
-
+ # Call the request method
response = req_method(full_url, headers, _kwargs)
+ # Raise for status codes
if response.status_code == 400:
raise BadRequest(response.json())
elif response.status_code == 401:
| Canvas.list_calendar_events should accept datetime instances
The `Canvas.list_calendar_events` method feeds its parameters down to a `GET /api/v1/calendar_events` request. [That request](https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.index) accepts `start_date` and `end_date` parameters, which “should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.” When using these parameters from Python, it would be convenient to provide values as Python `datetime` instances. For example, to list events between a course’ start and end dates:
```python
from datetime import datetime, timedelta
soon = datetime.today() + timedelta(weeks=2)
events = canvas.list_calendar_events(end_date=soon)
```
This seems like an obvious way to make the call, but iterating over the result fails using version 0.6.0 of `canvasapi`:
```
canvasapi.exceptions.BadRequest: {'end_date': 'Invalid date or invalid datetime for end_date'}'}
```
The call works if I convert the date into a suitably-formatted (ISO 8601) string:
```python
from datetime import datetime, timedelta
soon = datetime.today() + timedelta(weeks=2)
soon_iso = soon.isoformat()
events = canvas.list_calendar_events(end_date=soon_iso)
```
It would be very convenient if `Canvas.list_calendar_events` could accept parameters in the standard `datetime` a Python programmer would naturally expect to use, and would handle the ISO 8601 conversion internally.
Of course, dates and times appear in many other places in the Canvas API. I am reporting this against `Canvas.list_calendar_events` because that is where it is affecting me right now. But perhaps the best fix would be at a lower level within `canvasapi` so that *every* `datetime` instance is converted using `.isoformat` whenever given as the value of any request parameter. Is there a single, centralized place where that could be done? | ucfopen/canvasapi | diff --git a/tests/test_requester.py b/tests/test_requester.py
index 0fabb9f..d65f2d4 100644
--- a/tests/test_requester.py
+++ b/tests/test_requester.py
@@ -1,7 +1,10 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+from datetime import datetime
import unittest
+import requests
import requests_mock
+from six.moves.urllib.parse import quote
from canvasapi import Canvas
from canvasapi.exceptions import (
@@ -26,12 +29,42 @@ class TestRequester(unittest.TestCase):
response = self.requester.request('GET', 'fake_get_request')
self.assertEqual(response.status_code, 200)
+ def test_request_get_datetime(self, m):
+ date = datetime.today()
+
+ def custom_matcher(request):
+ match_query = 'date={}'.format(quote(date.isoformat()).lower())
+ if request.query == match_query:
+ resp = requests.Response()
+ resp.status_code = 200
+ return resp
+
+ m.add_matcher(custom_matcher)
+
+ response = self.requester.request('GET', 'test', date=date)
+ self.assertEqual(response.status_code, 200)
+
def test_request_post(self, m):
register_uris({'requests': ['post']}, m)
response = self.requester.request('POST', 'fake_post_request')
self.assertEqual(response.status_code, 200)
+ def test_request_post_datetime(self, m):
+ date = datetime.today()
+
+ def custom_matcher(request):
+ match_text = 'date={}'.format(quote(date.isoformat()))
+ if request.text == match_text:
+ resp = requests.Response()
+ resp.status_code = 200
+ return resp
+
+ m.add_matcher(custom_matcher)
+
+ response = self.requester.request('POST', 'test', date=date)
+ self.assertEqual(response.status_code, 200)
+
def test_request_delete(self, m):
register_uris({'requests': ['delete']}, m)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"flake8",
"pyflakes",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
-e git+https://github.com/ucfopen/canvasapi.git@9fd5a6a5dfcbbaae52b9d42752ffee163852b1f9#egg=canvasapi
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.18.1
flake8==5.0.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
requests==2.27.1
requests-mock==1.12.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.18.1
- flake8==5.0.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- requests-mock==1.12.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_requester.py::TestRequester::test_request_get_datetime",
"tests/test_requester.py::TestRequester::test_request_post_datetime"
] | [] | [
"tests/test_requester.py::TestRequester::test_request_400",
"tests/test_requester.py::TestRequester::test_request_401_InvalidAccessToken",
"tests/test_requester.py::TestRequester::test_request_401_Unauthorized",
"tests/test_requester.py::TestRequester::test_request_404",
"tests/test_requester.py::TestRequester::test_request_500",
"tests/test_requester.py::TestRequester::test_request_delete",
"tests/test_requester.py::TestRequester::test_request_get",
"tests/test_requester.py::TestRequester::test_request_post",
"tests/test_requester.py::TestRequester::test_request_put"
] | [] | MIT License | 1,639 | 450 | [
"canvasapi/requester.py"
] |
|
jupyter__nbgrader-888 | 6a5a895fb1de74af4de84f55288421833a655976 | 2017-09-01 12:04:34 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/preprocessors/overwritekernelspec.py b/nbgrader/preprocessors/overwritekernelspec.py
index cff743e3..f8995051 100644
--- a/nbgrader/preprocessors/overwritekernelspec.py
+++ b/nbgrader/preprocessors/overwritekernelspec.py
@@ -14,9 +14,17 @@ class OverwriteKernelspec(NbGraderPreprocessor):
db_url = resources['nbgrader']['db_url']
with Gradebook(db_url) as gb:
- kernelspec = gb.find_notebook(notebook_id, assignment_id).kernelspec
+ kernelspec = json.loads(
+ gb.find_notebook(notebook_id, assignment_id).kernelspec)
+ self.log.debug("Source notebook kernelspec: {}".format(kernelspec))
+ self.log.debug(
+ "Submitted notebook kernelspec: {}"
+ "".format(nb.metadata.get('kernelspec', None))
+ )
if kernelspec:
self.log.debug(
- "Overwriting notebook kernelspec with: {}".format(kernelspec))
- nb.metadata['kernelspec'] = json.loads(kernelspec)
+ "Overwriting submitted notebook kernelspec: {}"
+ "".format(kernelspec)
+ )
+ nb.metadata['kernelspec'] = kernelspec
return nb, resources
| Tests failing due to kernelspec validation on autograding
See #880 and https://travis-ci.org/jupyter/nbgrader/builds/270643170 | jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_autograde.py b/nbgrader/tests/apps/test_nbgrader_autograde.py
index 8d8576a9..eba18c8d 100644
--- a/nbgrader/tests/apps/test_nbgrader_autograde.py
+++ b/nbgrader/tests/apps/test_nbgrader_autograde.py
@@ -658,21 +658,49 @@ class TestNbGraderAutograde(BaseTestApp):
assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb"))
- def test_handle_failure_missing_kernelspec(self, course_dir):
+ def test_missing_source_kernelspec(self, course_dir):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]\n""")
fh.write("""c.ClearSolutions.code_stub = {'python': '## Answer', 'blah': '## Answer'}""")
- self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb"), kernel="blah")
+ self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["assign", "ps1"])
- self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"), kernel="blah")
- self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"), kernel="python")
+ self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"), kernel="python")
+ run_nbgrader(["autograde", "ps1"])
+ assert os.path.exists(join(course_dir, "autograded", "foo", "ps1"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
+
+ self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"), kernel="blarg")
run_nbgrader(["autograde", "ps1"], retcode=1)
+ assert not os.path.exists(join(course_dir, "autograded", "bar", "ps1"))
+
+ def test_incorrect_source_kernelspec(self, course_dir):
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]\n""")
+ fh.write("""c.ClearSolutions.code_stub = {'python': '## Answer', 'blah': '## Answer'}""")
+
+ self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb"), kernel="blah")
+ run_nbgrader(["assign", "ps1"])
+ self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"), kernel="python")
+ run_nbgrader(["autograde", "ps1"], retcode=1)
assert not os.path.exists(join(course_dir, "autograded", "foo", "ps1"))
- assert not os.path.exists(join(course_dir, "autograded", "bar", "ps1"))
+
+ def test_incorrect_submitted_kernelspec(self, db, course_dir):
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]""")
+
+ self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb"), kernel="python")
+ run_nbgrader(["assign", "ps1"])
+
+ self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"), kernel="blah")
+ run_nbgrader(["autograde", "ps1"])
+ assert os.path.exists(join(course_dir, "autograded", "foo", "ps1"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
def test_no_execute(self, course_dir):
with open("nbgrader_config.py", "a") as fh:
@@ -771,23 +799,6 @@ class TestNbGraderAutograde(BaseTestApp):
assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
- def test_overwrite_kernelspec(self, db, course_dir):
- with open("nbgrader_config.py", "a") as fh:
- fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
- fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]""")
-
- self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb"))
- run_nbgrader(["assign", "ps1"])
-
- self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
- self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"), kernel="blah")
- run_nbgrader(["autograde", "ps1"])
-
- assert os.path.exists(join(course_dir, "autograded", "foo", "ps1"))
- assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
- assert os.path.exists(join(course_dir, "autograded", "bar", "ps1"))
- assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb"))
-
def test_missing_files(self, db, course_dir):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyenchant",
"sphinxcontrib-spelling",
"sphinx_rtd_theme",
"nbval",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
contextvars==2.4
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@6a5a895fb1de74af4de84f55288421833a655976#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- contextvars==2.4
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_permissions",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_custom_permissions",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_handle_failure",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_handle_failure_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_source_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_files"
] | [
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_force_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_update_newer_single_notebook"
] | [
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_help",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_student",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_assignment",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_timestamp",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_empty_timestamp",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_none",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_zero",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_plugin",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_force",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_filter_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_overwrite_files",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_side_effects",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_skip_extra_notebooks",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_update_newer",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_hidden_tests_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_incorrect_source_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_incorrect_submitted_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_no_execute",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_infinite_loop",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_missing_notebook"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,642 | 314 | [
"nbgrader/preprocessors/overwritekernelspec.py"
] |
|
dask__dask-2647 | d32e8b7b91e130037701daffabf663d8f1bae5de | 2017-09-01 15:47:53 | c560965c8fc0da7cbc0920d43b7011d2721307d3 | fujiisoup: Thanks for the review.
Done. | diff --git a/dask/array/slicing.py b/dask/array/slicing.py
index 269ffbc49..42e65cc45 100644
--- a/dask/array/slicing.py
+++ b/dask/array/slicing.py
@@ -62,7 +62,7 @@ def sanitize_index(ind):
# If a 1-element tuple, unwrap the element
nonzero = nonzero[0]
return np.asanyarray(nonzero)
- elif np.issubdtype(index_array.dtype, int):
+ elif np.issubdtype(index_array.dtype, np.integer):
return index_array
elif np.issubdtype(index_array.dtype, float):
int_index = index_array.astype(np.intp)
| Indexing dask.array by an unsigned-integer np.ndarray
Indexing dask.array by an unsigned-integer np.ndarray raises TypeError.
Is it an intended behavior?
```python
In [1]: import numpy as np
...: import dask.array as da
...:
...: array = da.from_array(np.arange(6), chunks=3)
...: array[np.array([0])] # indexing works with integer array
...:
Out[1]: dask.array<getitem, shape=(1,), dtype=int64, chunksize=(1,)>
In [2]: array[np.array([0], dtype=np.uint64)] # not with uint array
...:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-6161ebf9f837> in <module>()
----> 1 array[np.array([0], dtype=np.uint64)] # not with uint array
~/anaconda3/envs/xarray/lib/python3.5/site-packages/dask/array/core.py in __getitem__(self, index)
1197 return self
1198
-> 1199 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1200
1201 dsk2 = sharedict.merge(self.dask, (out, dsk))
~/anaconda3/envs/xarray/lib/python3.5/site-packages/dask/array/slicing.py in slice_array(out_name, in_name, blockdims, index)
140 """
141 index = replace_ellipsis(len(blockdims), index)
--> 142 index = tuple(map(sanitize_index, index))
143
144 blockdims = tuple(map(tuple, blockdims))
~/anaconda3/envs/xarray/lib/python3.5/site-packages/dask/array/slicing.py in sanitize_index(ind)
76 first_err)
77 else:
---> 78 raise TypeError("Invalid index type", type(ind), ind)
79
80
TypeError: ('Invalid index type', <class 'numpy.ndarray'>, array([0], dtype=uint64))
```
python 3.5
dask 0.15.2
originally raised in an [xarray issue](https://github.com/pydata/xarray/issues/1405). | dask/dask | diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index d6af863f3..f6f04ee7a 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -1611,6 +1611,14 @@ def test_slice_with_floats():
d[[1, 1.5]]
+def test_slice_with_uint():
+ x = np.arange(10)
+ dx = da.from_array(x, chunks=5)
+ inds = np.array([0, 3, 6], dtype='u8')
+ assert_eq(dx[inds], x[inds])
+ assert_eq(dx[inds.astype('u4')], x[inds.astype('u4')])
+
+
def test_vindex_basic():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"pytest-cov",
"pytest-mock",
"pytest-asyncio",
"numpy>=1.16.0",
"pandas>=1.0.0"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs==22.2.0
botocore==1.23.24
certifi==2021.5.30
charset-normalizer==3.0.1
click==8.0.4
cloudpickle==2.2.1
coverage==6.2
-e git+https://github.com/dask/dask.git@d32e8b7b91e130037701daffabf663d8f1bae5de#egg=dask
distributed==1.19.3
execnet==1.9.0
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata==4.8.3
iniconfig==1.1.1
jmespath==0.10.0
locket==1.0.0
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging==21.3
pandas==1.1.5
partd==1.2.0
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
s3fs==2022.1.0
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions==4.1.1
urllib3==1.26.20
wrapt==1.16.0
yarl==1.7.2
zict==2.1.0
zipp==3.6.0
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- attrs==22.2.0
- botocore==1.23.24
- charset-normalizer==3.0.1
- click==8.0.4
- cloudpickle==2.2.1
- coverage==6.2
- distributed==1.19.3
- execnet==1.9.0
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jmespath==0.10.0
- locket==1.0.0
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- partd==1.2.0
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- s3fs==2022.1.0
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- wrapt==1.16.0
- yarl==1.7.2
- zict==2.1.0
- zipp==3.6.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_array_core.py::test_slice_with_uint"
] | [
"dask/array/tests/test_array_core.py::test_concatenate_unknown_axes",
"dask/array/tests/test_array_core.py::test_field_access",
"dask/array/tests/test_array_core.py::test_field_access_with_shape",
"dask/array/tests/test_array_core.py::test_matmul",
"dask/array/tests/test_array_core.py::test_to_dask_dataframe"
] | [
"dask/array/tests/test_array_core.py::test_getem",
"dask/array/tests/test_array_core.py::test_top",
"dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules",
"dask/array/tests/test_array_core.py::test_concatenate3_on_scalars",
"dask/array/tests/test_array_core.py::test_chunked_dot_product",
"dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions",
"dask/array/tests/test_array_core.py::test_Array",
"dask/array/tests/test_array_core.py::test_uneven_chunks",
"dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims",
"dask/array/tests/test_array_core.py::test_keys",
"dask/array/tests/test_array_core.py::test_Array_computation",
"dask/array/tests/test_array_core.py::test_stack",
"dask/array/tests/test_array_core.py::test_short_stack",
"dask/array/tests/test_array_core.py::test_stack_scalars",
"dask/array/tests/test_array_core.py::test_stack_promote_type",
"dask/array/tests/test_array_core.py::test_stack_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate",
"dask/array/tests/test_array_core.py::test_concatenate_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings",
"dask/array/tests/test_array_core.py::test_binops",
"dask/array/tests/test_array_core.py::test_broadcast_shapes",
"dask/array/tests/test_array_core.py::test_elemwise_on_scalars",
"dask/array/tests/test_array_core.py::test_partial_by_order",
"dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays",
"dask/array/tests/test_array_core.py::test_elemwise_differently_chunked",
"dask/array/tests/test_array_core.py::test_elemwise_dtype",
"dask/array/tests/test_array_core.py::test_operators",
"dask/array/tests/test_array_core.py::test_operator_dtype_promotion",
"dask/array/tests/test_array_core.py::test_T",
"dask/array/tests/test_array_core.py::test_norm",
"dask/array/tests/test_array_core.py::test_broadcast_to",
"dask/array/tests/test_array_core.py::test_broadcast_to_array",
"dask/array/tests/test_array_core.py::test_broadcast_to_scalar",
"dask/array/tests/test_array_core.py::test_reshape[original_shape0-new_shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape1-new_shape1-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape2-new_shape2-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape3-new_shape3-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape4-new_shape4-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape5-new_shape5-chunks5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape6-new_shape6-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape7-new_shape7-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape8-new_shape8-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape9-new_shape9-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape10-new_shape10-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape11-new_shape11-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape12-new_shape12-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape13-new_shape13-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape14-new_shape14-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape15-new_shape15-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape16-new_shape16-chunks16]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape17-new_shape17-3]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape18-new_shape18-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape19-new_shape19-chunks19]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape20-new_shape20-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape21-new_shape21-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape22-new_shape22-24]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape23-new_shape23-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape24-new_shape24-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape25-new_shape25-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape26-new_shape26-chunks26]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape27-new_shape27-chunks27]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape28-new_shape28-chunks28]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape29-new_shape29-chunks29]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape30-new_shape30-chunks30]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape31-new_shape31-chunks31]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape32-new_shape32-chunks32]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape33-new_shape33-chunks33]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape34-new_shape34-chunks34]",
"dask/array/tests/test_array_core.py::test_reshape_exceptions",
"dask/array/tests/test_array_core.py::test_reshape_splat",
"dask/array/tests/test_array_core.py::test_reshape_fails_for_dask_only",
"dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions",
"dask/array/tests/test_array_core.py::test_full",
"dask/array/tests/test_array_core.py::test_map_blocks",
"dask/array/tests/test_array_core.py::test_map_blocks2",
"dask/array/tests/test_array_core.py::test_map_blocks_with_constants",
"dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs",
"dask/array/tests/test_array_core.py::test_map_blocks_with_chunks",
"dask/array/tests/test_array_core.py::test_map_blocks_dtype_inference",
"dask/array/tests/test_array_core.py::test_from_function_requires_block_args",
"dask/array/tests/test_array_core.py::test_repr",
"dask/array/tests/test_array_core.py::test_slicing_with_ellipsis",
"dask/array/tests/test_array_core.py::test_slicing_with_ndarray",
"dask/array/tests/test_array_core.py::test_dtype",
"dask/array/tests/test_array_core.py::test_blockdims_from_blockshape",
"dask/array/tests/test_array_core.py::test_coerce",
"dask/array/tests/test_array_core.py::test_store_delayed_target",
"dask/array/tests/test_array_core.py::test_store",
"dask/array/tests/test_array_core.py::test_store_regions",
"dask/array/tests/test_array_core.py::test_store_compute_false",
"dask/array/tests/test_array_core.py::test_store_locks",
"dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions",
"dask/array/tests/test_array_core.py::test_dtype_complex",
"dask/array/tests/test_array_core.py::test_astype",
"dask/array/tests/test_array_core.py::test_arithmetic",
"dask/array/tests/test_array_core.py::test_elemwise_consistent_names",
"dask/array/tests/test_array_core.py::test_optimize",
"dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays",
"dask/array/tests/test_array_core.py::test_getter",
"dask/array/tests/test_array_core.py::test_size",
"dask/array/tests/test_array_core.py::test_nbytes",
"dask/array/tests/test_array_core.py::test_itemsize",
"dask/array/tests/test_array_core.py::test_Array_normalizes_dtype",
"dask/array/tests/test_array_core.py::test_from_array_with_lock",
"dask/array/tests/test_array_core.py::test_from_array_no_asarray",
"dask/array/tests/test_array_core.py::test_from_array_getitem",
"dask/array/tests/test_array_core.py::test_asarray",
"dask/array/tests/test_array_core.py::test_asanyarray",
"dask/array/tests/test_array_core.py::test_from_func",
"dask/array/tests/test_array_core.py::test_concatenate3_2",
"dask/array/tests/test_array_core.py::test_map_blocks3",
"dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks",
"dask/array/tests/test_array_core.py::test_normalize_chunks",
"dask/array/tests/test_array_core.py::test_raise_on_no_chunks",
"dask/array/tests/test_array_core.py::test_chunks_is_immutable",
"dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs",
"dask/array/tests/test_array_core.py::test_long_slice",
"dask/array/tests/test_array_core.py::test_ellipsis_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice",
"dask/array/tests/test_array_core.py::test_slice_with_floats",
"dask/array/tests/test_array_core.py::test_vindex_basic",
"dask/array/tests/test_array_core.py::test_vindex_nd",
"dask/array/tests/test_array_core.py::test_vindex_errors",
"dask/array/tests/test_array_core.py::test_vindex_merge",
"dask/array/tests/test_array_core.py::test_empty_array",
"dask/array/tests/test_array_core.py::test_memmap",
"dask/array/tests/test_array_core.py::test_to_npy_stack",
"dask/array/tests/test_array_core.py::test_view",
"dask/array/tests/test_array_core.py::test_view_fortran",
"dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension",
"dask/array/tests/test_array_core.py::test_broadcast_chunks",
"dask/array/tests/test_array_core.py::test_chunks_error",
"dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs",
"dask/array/tests/test_array_core.py::test_dont_fuse_outputs",
"dask/array/tests/test_array_core.py::test_dont_dealias_outputs",
"dask/array/tests/test_array_core.py::test_timedelta_op",
"dask/array/tests/test_array_core.py::test_to_delayed",
"dask/array/tests/test_array_core.py::test_to_delayed_optimizes",
"dask/array/tests/test_array_core.py::test_cumulative",
"dask/array/tests/test_array_core.py::test_atop_names",
"dask/array/tests/test_array_core.py::test_atop_new_axes",
"dask/array/tests/test_array_core.py::test_atop_kwargs",
"dask/array/tests/test_array_core.py::test_atop_chunks",
"dask/array/tests/test_array_core.py::test_from_delayed",
"dask/array/tests/test_array_core.py::test_A_property",
"dask/array/tests/test_array_core.py::test_copy_mutate",
"dask/array/tests/test_array_core.py::test_npartitions",
"dask/array/tests/test_array_core.py::test_astype_gh1151",
"dask/array/tests/test_array_core.py::test_elemwise_name",
"dask/array/tests/test_array_core.py::test_map_blocks_name",
"dask/array/tests/test_array_core.py::test_from_array_names",
"dask/array/tests/test_array_core.py::test_array_picklable",
"dask/array/tests/test_array_core.py::test_from_array_raises_on_bad_chunks",
"dask/array/tests/test_array_core.py::test_concatenate_axes",
"dask/array/tests/test_array_core.py::test_atop_concatenate",
"dask/array/tests/test_array_core.py::test_common_blockdim",
"dask/array/tests/test_array_core.py::test_uneven_chunks_that_fit_neatly",
"dask/array/tests/test_array_core.py::test_elemwise_uneven_chunks",
"dask/array/tests/test_array_core.py::test_uneven_chunks_atop",
"dask/array/tests/test_array_core.py::test_warn_bad_rechunking",
"dask/array/tests/test_array_core.py::test_optimize_fuse_keys",
"dask/array/tests/test_array_core.py::test_concatenate_stack_dont_warn",
"dask/array/tests/test_array_core.py::test_map_blocks_delayed",
"dask/array/tests/test_array_core.py::test_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_2d",
"dask/array/tests/test_array_core.py::test_no_chunks_yes_chunks",
"dask/array/tests/test_array_core.py::test_raise_informative_errors_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_slicing_2d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_1d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_2d",
"dask/array/tests/test_array_core.py::test_setitem_1d",
"dask/array/tests/test_array_core.py::test_setitem_2d",
"dask/array/tests/test_array_core.py::test_setitem_errs",
"dask/array/tests/test_array_core.py::test_zero_slice_dtypes",
"dask/array/tests/test_array_core.py::test_zero_sized_array_rechunk",
"dask/array/tests/test_array_core.py::test_atop_zero_shape",
"dask/array/tests/test_array_core.py::test_atop_zero_shape_new_axes",
"dask/array/tests/test_array_core.py::test_broadcast_against_zero_shape",
"dask/array/tests/test_array_core.py::test_fast_from_array",
"dask/array/tests/test_array_core.py::test_random_from_array",
"dask/array/tests/test_array_core.py::test_concatenate_errs",
"dask/array/tests/test_array_core.py::test_stack_errs",
"dask/array/tests/test_array_core.py::test_atop_with_numpy_arrays",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-6]",
"dask/array/tests/test_array_core.py::test_constructor_plugin",
"dask/array/tests/test_array_core.py::test_no_warnings_on_metadata",
"dask/array/tests/test_array_core.py::test_delayed_array_key_hygeine"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,645 | 166 | [
"dask/array/slicing.py"
] |
google__mobly-328 | 51c912d1a8ffd5ace4a9a744a46498515cf5c145 | 2017-09-01 21:02:53 | 7e5e62af4ab4537bf619f0ee403c05f004c5baf0 | dthkao:
Review status: 0 of 1 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 839 at r1](https://reviewable.io:443/reviews/google/mobly/328#-KszMvOnUpIQ_wuRiftL:-KszMvOnUpIQ_wuRiftM:b-dddf90) ([raw file](https://github.com/google/mobly/blob/41979c89e8b741071f4d90fe81cc0f8724c9dec1/mobly/controllers/android_device.py#L839)):*
> ```Python
> except AttributeError:
> extra_params = ''
> cmd = '"%s" -s %s logcat -v threadtime %s >> %s' % (
> ```
Can we quote the file path in the literal rather than edit the string?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 1 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device.py, line 839 at r1](https://reviewable.io:443/reviews/google/mobly/328#-KszMvOnUpIQ_wuRiftL:-KszOwCoUGOHTmyHnR4c:b-896fix) ([raw file](https://github.com/google/mobly/blob/41979c89e8b741071f4d90fe81cc0f8724c9dec1/mobly/controllers/android_device.py#L839)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
Can we quote the file path in the literal rather than edit the string?
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 1 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/controllers/android_device.py, line 832 at r2](https://reviewable.io:443/reviews/google/mobly/328#-KszPom4LWZUJNxFsMzj:-KszPom4LWZUJNxFsMzk:bqebhii) ([raw file](https://github.com/google/mobly/blob/fb26406eb9181bf644ace98857e9f54bd2f53c5f/mobly/controllers/android_device.py#L832)):*
> ```Python
> self.adb.shell('logpersist.stop --clear')
> self.adb.shell('logpersist.start')
> f_name = 'adblog,%s,%s.txt' % ('model a c', self.serial)
> ```
why this change?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/controllers/android_device.py, line 832 at r2](https://reviewable.io:443/reviews/google/mobly/328#-KszPom4LWZUJNxFsMzj:-KszR96tRrUELpuNC-IO:b-896fix) ([raw file](https://github.com/google/mobly/blob/fb26406eb9181bf644ace98857e9f54bd2f53c5f/mobly/controllers/android_device.py#L832)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
why this change?
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328)*
<!-- Sent from Reviewable.io -->
dthkao: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328#-:-KtGF4JfiLwz8PYZvweh:bnfp4nl)*
<!-- Sent from Reviewable.io -->
k2fong:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/controllers/android_device.py, line 839 at r1](https://reviewable.io:443/reviews/google/mobly/328#-KszMvOnUpIQ_wuRiftL:-KtHnpJS_gLK3fm0aWke:b-4nkc0v) ([raw file](https://github.com/google/mobly/blob/41979c89e8b741071f4d90fe81cc0f8724c9dec1/mobly/controllers/android_device.py#L839)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Done.
</blockquote></details>
why not have the quote inside the string literal? seems like that'll follow how adb is done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/controllers/android_device.py, line 839 at r1](https://reviewable.io:443/reviews/google/mobly/328#-KszMvOnUpIQ_wuRiftL:-KtHrazK6XDU2xpFjRls:b-896fix) ([raw file](https://github.com/google/mobly/blob/41979c89e8b741071f4d90fe81cc0f8724c9dec1/mobly/controllers/android_device.py#L839)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
why not have the quote inside the string literal? seems like that'll follow how adb is done.
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328)*
<!-- Sent from Reviewable.io -->
k2fong: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/328#-:-KtHvHPp4h2cIKq9J2C1:bnfp4nl)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py
index 5d7027e..e52e5fd 100644
--- a/mobly/controllers/android_device.py
+++ b/mobly/controllers/android_device.py
@@ -836,10 +836,8 @@ class AndroidDevice(object):
extra_params = self.adb_logcat_param
except AttributeError:
extra_params = ''
- cmd = '"%s" -s %s logcat -v threadtime %s >> %s' % (adb.ADB,
- self.serial,
- extra_params,
- logcat_file_path)
+ cmd = '"%s" -s %s logcat -v threadtime %s >> "%s"' % (
+ adb.ADB, self.serial, extra_params, logcat_file_path)
process = utils.start_standing_subprocess(cmd, shell=True)
self._adb_logcat_process = process
self.adb_logcat_file_path = logcat_file_path
| `AndroidDevice` adb logcat file name gets truncated
If device model name includes space, the output file name of adb logcat is incorrect.
For example, for model "aqua power hd-4g", we expect `adblog,aqua power hd-4g,usb:3-5.2.3.txt`, but we actually get `adblog,aqua`. | google/mobly | diff --git a/tests/mobly/controllers/android_device_test.py b/tests/mobly/controllers/android_device_test.py
index 1849496..59c5529 100755
--- a/tests/mobly/controllers/android_device_test.py
+++ b/tests/mobly/controllers/android_device_test.py
@@ -326,7 +326,7 @@ class AndroidDeviceTest(unittest.TestCase):
creat_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = '"adb" -s %s logcat -v threadtime >> %s'
start_proc_mock.assert_called_with(
- adb_cmd % (ad.serial, expected_log_path), shell=True)
+ adb_cmd % (ad.serial, '"%s"' % expected_log_path), shell=True)
self.assertEqual(ad.adb_logcat_file_path, expected_log_path)
expected_msg = (
'Logcat thread is already running, cannot start another'
@@ -373,7 +373,7 @@ class AndroidDeviceTest(unittest.TestCase):
creat_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = '"adb" -s %s logcat -v threadtime -b radio >> %s'
start_proc_mock.assert_called_with(
- adb_cmd % (ad.serial, expected_log_path), shell=True)
+ adb_cmd % (ad.serial, '"%s"' % expected_log_path), shell=True)
self.assertEqual(ad.adb_logcat_file_path, expected_log_path)
@mock.patch(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc",
"apt-get install -y adb"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@51c912d1a8ffd5ace4a9a744a46498515cf5c145#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat_with_user_param"
] | [] | [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_build_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_debug_tag",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_instantiation",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_attribute_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_package",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_snippet_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_snippet_cleanup",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fail",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fallback",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_dict_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_empty_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_no_valid_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_not_list_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_pickup_all",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_string_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_usb_id",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial_and_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_too_many_matches",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads"
] | [] | Apache License 2.0 | 1,646 | 232 | [
"mobly/controllers/android_device.py"
] |
palantir__python-language-server-124 | aa8e1b2c12759470c18910c61f55ab4c6b80d344 | 2017-09-04 22:54:53 | e59167faa05a84bce7fd569a0ee477803a27a6e3 | diff --git a/pyls/language_server.py b/pyls/language_server.py
index cfd187b..ce11da6 100644
--- a/pyls/language_server.py
+++ b/pyls/language_server.py
@@ -37,7 +37,7 @@ def start_tcp_lang_server(bind_addr, port, handler_class):
log.info("Serving %s on (%s, %s)", handler_class.__name__, bind_addr, port)
server.serve_forever()
except KeyboardInterrupt:
- server.shutdown()
+ server.exit()
finally:
log.info("Shutting down")
server.server_close()
@@ -113,7 +113,7 @@ class LanguageServer(MethodJSONRPCServer):
self.shutdown()
def m_exit(self, **_kwargs):
- self.shutdown()
+ self.exit()
_RE_FIRST_CAP = re.compile('(.)([A-Z][a-z]+)')
diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index a0ee0c7..668096d 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -64,12 +64,6 @@ class PyCodeStyleDiagnosticReport(pycodestyle.BaseReport):
'range': range,
'message': text,
'code': code,
- 'severity': _get_severity(code)
+ # Are style errors really ever errors?
+ 'severity': lsp.DiagnosticSeverity.Warning
})
-
-
-def _get_severity(code):
- if code[0] == 'E':
- return lsp.DiagnosticSeverity.Error
- elif code[0] == 'W':
- return lsp.DiagnosticSeverity.Warning
diff --git a/pyls/server.py b/pyls/server.py
index 9e3fcac..83f8002 100644
--- a/pyls/server.py
+++ b/pyls/server.py
@@ -14,23 +14,36 @@ class JSONRPCServer(object):
def __init__(self, rfile, wfile):
self.rfile = rfile
self.wfile = wfile
+ self._shutdown = False
- def shutdown(self):
- # TODO: we should handle this much better
+ def exit(self):
+ # Exit causes a complete exit of the server
self.rfile.close()
self.wfile.close()
+ def shutdown(self):
+ # Shutdown signals the server to stop, but not exit
+ self._shutdown = True
+ log.debug("Server shut down, awaiting exit notification")
+
def handle(self):
# VSCode wants us to keep the connection open, so let's handle messages in a loop
while True:
try:
data = self._read_message()
log.debug("Got message: %s", data)
+
+ if self._shutdown:
+ # Handle only the exit notification when we're shut down
+ jsonrpc.JSONRPCResponseManager.handle(data, {'exit': self.exit})
+ break
+
response = jsonrpc.JSONRPCResponseManager.handle(data, self)
+
if response is not None:
self._write_message(response.data)
except Exception:
- log.exception("Language server shutting down for uncaught exception")
+ log.exception("Language server exiting due to uncaught exception")
break
def call(self, method, params=None):
| Severity of linting erros
I currently really dislike the displayed errors of the linters.
If I use `pycodestyle` and `Pyflakes` I would prefer that `pycodestyle`s errors like `line too long`, `missing whitespace` could be *downgraded* to warnings or info. Obviously `Pyflakes` messages are more severe, but I still would like `pycodestyles` as an information.
I hope it can be understood what I mean. Is there a way to configure `pyls` this way? | palantir/python-language-server | diff --git a/test/test_language_server.py b/test/test_language_server.py
index 408fccc..e1641df 100644
--- a/test/test_language_server.py
+++ b/test/test_language_server.py
@@ -37,10 +37,10 @@ def client_server():
yield client, server
- try:
- client.call('shutdown')
- except:
- pass
+ client.call('shutdown')
+ response = _get_response(client)
+ assert response['result'] is None
+ client.notify('exit')
def test_initialize(client_server):
@@ -56,13 +56,6 @@ def test_initialize(client_server):
assert 'capabilities' in response['result']
-def test_file_closed(client_server):
- client, server = client_server
- client.rfile.close()
- with pytest.raises(Exception):
- _get_response(client)
-
-
def test_missing_message(client_server):
client, server = client_server
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
configparser==7.2.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
future==1.0.0
iniconfig==2.1.0
jedi==0.19.2
json-rpc==1.15.0
packaging==24.2
parso==0.8.4
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/palantir/python-language-server.git@aa8e1b2c12759470c18910c61f55ab4c6b80d344#egg=python_language_server
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
versioneer==0.29
virtualenv==20.29.3
yapf==0.43.0
| name: python-language-server
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- configparser==7.2.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- future==1.0.0
- iniconfig==2.1.0
- jedi==0.19.2
- json-rpc==1.15.0
- packaging==24.2
- parso==0.8.4
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- versioneer==0.29
- virtualenv==20.29.3
- yapf==0.43.0
prefix: /opt/conda/envs/python-language-server
| [
"test/test_language_server.py::test_initialize",
"test/test_language_server.py::test_missing_message",
"test/test_language_server.py::test_linting"
] | [] | [] | [] | MIT License | 1,651 | 775 | [
"pyls/language_server.py",
"pyls/plugins/pycodestyle_lint.py",
"pyls/server.py"
] |
|
OpenMined__PySyft-216 | 02ba82e94de28b2dca463f06c22c7f4df4f78dd3 | 2017-09-05 05:27:42 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/tensor.py b/syft/tensor.py
index c1aa0e310e..859dd0ff4f 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -858,6 +858,19 @@ class TensorBase(object):
else:
return self.data.size
+ def cumprod(self, dim=0):
+ """Returns the cumulative product of elements in the dimension dim."""
+ if self.encrypted:
+ return NotImplemented
+ return syft.math.cumprod(self, dim)
+
+ def cumprod_(self, dim=0):
+ """calculate in-place the cumulative product of elements in the dimension dim."""
+ if self.encrypted:
+ return NotImplemented
+ self.data = syft.math.cumprod(self, dim).data
+ return self
+
def split(self, split_size, dim=0):
"""Returns tuple of tensors of equally sized tensor/chunks (if possible)"""
if self.encrypted:
| Implement Default cumprod Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, cumprod() should return a new tensor. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index fcded4a250..edd6a9d92c 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -647,6 +647,23 @@ class nonzeroTests(unittest.TestCase):
self.assertTrue(np.array_equal(t2.data, np.array([[0, 1, 1], [0, 1, 2]])))
+class cumprodTest(unittest.TestCase):
+ def testCumprod(self):
+ t1 = TensorBase(np.array([[1, 2, 3], [4, 5, 6]]))
+ t2 = TensorBase(np.array([[1.0, 2.0, 3.0], [4.0, 10.0, 18.0]]))
+ t3 = TensorBase(np.array([[1, 2, 6], [4, 20, 120]]))
+ self.assertTrue(np.equal(t1.cumprod(dim=0), t2).all())
+ self.assertTrue(np.equal(t1.cumprod(dim=1), t3).all())
+
+ def testCumprod_(self):
+ t1 = TensorBase(np.array([[1, 2, 3], [4, 5, 6]]))
+ t2 = TensorBase(np.array([[1.0, 2.0, 3.0], [4.0, 10.0, 18.0]]))
+ t3 = TensorBase(np.array([[1, 2, 6], [4, 20, 120]]))
+ self.assertTrue(np.equal(t1.cumprod_(dim=0), t2).all())
+ t1 = TensorBase(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
+ self.assertTrue(np.equal(t1.cumprod_(dim=1), t3).all())
+
+
class splitTests(unittest.TestCase):
def testSplit(self):
t1 = TensorBase(np.arange(8.0))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"line_profiler",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
attrs==22.2.0
certifi==2021.5.30
clint==0.5.1
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
line-profiler==4.1.3
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
phe==1.5.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pyRserve==1.0.4
pytest==7.0.1
pytest-flake8==1.1.1
-e git+https://github.com/OpenMined/PySyft.git@02ba82e94de28b2dca463f06c22c7f4df4f78dd3#egg=syft
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- attrs==22.2.0
- clint==0.5.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- line-profiler==4.1.3
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- phe==1.5.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pyrserve==1.0.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::cumprodTest::testCumprod",
"tests/test_tensor.py::cumprodTest::testCumprod_"
] | [] | [
"tests/test_tensor.py::DimTests::testAsView",
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::DimTests::testResize",
"tests/test_tensor.py::DimTests::testResizeAs",
"tests/test_tensor.py::DimTests::testSize",
"tests/test_tensor.py::DimTests::testView",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::ZeroTests::testZero",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MaxTests::testAxis",
"tests/test_tensor.py::MaxTests::testNoDim",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::transposeTests::testT",
"tests/test_tensor.py::transposeTests::testT_",
"tests/test_tensor.py::transposeTests::testTranspose",
"tests/test_tensor.py::transposeTests::testTranspose_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::signTests::testsign",
"tests/test_tensor.py::signTests::testsign_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_",
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_",
"tests/test_tensor.py::clampTests::testClampFloat",
"tests/test_tensor.py::clampTests::testClampFloatInPlace",
"tests/test_tensor.py::clampTests::testClampInt",
"tests/test_tensor.py::clampTests::testClampIntInPlace",
"tests/test_tensor.py::bernoulliTests::testBernoulli",
"tests/test_tensor.py::bernoulliTests::testBernoulli_",
"tests/test_tensor.py::uniformTests::testUniform",
"tests/test_tensor.py::uniformTests::testUniform_",
"tests/test_tensor.py::fillTests::testFill_",
"tests/test_tensor.py::topkTests::testTopK",
"tests/test_tensor.py::tolistTests::testToList",
"tests/test_tensor.py::traceTests::testTrace",
"tests/test_tensor.py::roundTests::testRound",
"tests/test_tensor.py::roundTests::testRound_",
"tests/test_tensor.py::repeatTests::testRepeat",
"tests/test_tensor.py::powTests::testPow",
"tests/test_tensor.py::powTests::testPow_",
"tests/test_tensor.py::prodTests::testProd",
"tests/test_tensor.py::randomTests::testRandom_",
"tests/test_tensor.py::nonzeroTests::testNonZero",
"tests/test_tensor.py::splitTests::testSplit",
"tests/test_tensor.py::squeezeTests::testSqueeze",
"tests/test_tensor.py::expandAsTests::testExpandAs"
] | [] | Apache License 2.0 | 1,653 | 239 | [
"syft/tensor.py"
] |
|
Clinical-Genomics__scout-593 | 7b2419a20dd5dbae88f21b9a2afe2c2d4ad5277f | 2017-09-05 12:38:59 | 8e1c3acd430a1f57f712aac29847e71cac8308f3 | diff --git a/scout/adapter/mongo/query.py b/scout/adapter/mongo/query.py
index 3a07b82d8..055963b99 100644
--- a/scout/adapter/mongo/query.py
+++ b/scout/adapter/mongo/query.py
@@ -114,15 +114,16 @@ class QueryHandler(object):
cadd_query = {'cadd_score': {'$gt': float(cadd)}}
logger.debug("Adding cadd_score: %s to query" % cadd)
- if query.get('cadd_inclusive') == 'yes':
+ if query.get('cadd_inclusive') == True:
cadd_query = {
'$or': [
cadd_query,
{'cadd_score': {'$exists': False}}
- ]}
+ ]}
logger.debug("Adding cadd inclusive to query")
mongo_query['$and'].append(cadd_query)
+
if query.get('genetic_models'):
models = query['genetic_models']
| CADD score filter monday-issue!
Thank you kindly for the quick inclusion of CADD score filtering! Will make a couple of our doctors very happy.
One major caveat though: the current version seems to filter out unknown CADD scores as well (similar to the unknown frequency bug)! Not intended usage..
| Clinical-Genomics/scout | diff --git a/tests/adapter/test_query.py b/tests/adapter/test_query.py
index e5aee3586..2d12aa555 100644
--- a/tests/adapter/test_query.py
+++ b/tests/adapter/test_query.py
@@ -57,7 +57,7 @@ def test_build_cadd_exclusive(adapter):
def test_build_cadd_inclusive(adapter):
case_id = 'cust000'
cadd = 10.0
- cadd_inclusive = 'yes'
+ cadd_inclusive = True
query = {'cadd_score': cadd, 'cadd_inclusive': cadd_inclusive}
mongo_query = adapter.build_query(case_id, query=query)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 3.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"cython",
"pytest",
"pytest-flask",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | babel==2.17.0
blinker==1.9.0
cachelib==0.13.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
coloredlogs==15.0.1
coverage==7.8.0
Cython==3.0.12
cyvcf2==0.31.1
dnspython==2.7.0
dominate==2.9.1
exceptiongroup==1.2.2
Flask==3.1.0
flask-babel==4.0.0
Flask-Bootstrap==3.3.7.1
Flask-DebugToolbar==0.16.0
Flask-Login==0.6.3
Flask-Mail==0.10.0
Flask-Markdown==0.3
Flask-OAuthlib==0.9.6
Flask-PyMongo==3.0.1
Flask-WTF==1.2.2
humanfriendly==10.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
intervaltree==3.1.0
invoke==2.2.0
itsdangerous==2.2.0
Jinja2==3.1.6
livereload==2.7.1
loqusdb==2.6.0
Markdown==3.7
MarkupSafe==3.0.2
mongo-adapter==0.3.3
mongomock==4.3.0
numpy==2.0.2
oauthlib==2.1.0
packaging==24.2
path==17.1.0
path.py==12.5.0
ped-parser==1.6.6
pluggy==1.5.0
pymongo==4.11.3
pytest==8.3.5
pytest-cov==6.0.0
pytest-flask==1.3.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
query-phenomizer==1.2.1
requests==2.32.3
requests-oauthlib==1.1.0
-e git+https://github.com/Clinical-Genomics/scout.git@7b2419a20dd5dbae88f21b9a2afe2c2d4ad5277f#egg=scout_browser
sentinels==1.0.0
six==1.17.0
sortedcontainers==2.4.0
tomli==2.2.1
tornado==6.4.2
urllib3==2.3.0
vcftoolbox==1.5.1
visitor==0.1.3
Werkzeug==3.1.3
WTForms==3.2.1
zipp==3.21.0
| name: scout
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- babel==2.17.0
- blinker==1.9.0
- cachelib==0.13.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- coloredlogs==15.0.1
- coverage==7.8.0
- cython==3.0.12
- cyvcf2==0.31.1
- dnspython==2.7.0
- dominate==2.9.1
- exceptiongroup==1.2.2
- flask==3.1.0
- flask-babel==4.0.0
- flask-bootstrap==3.3.7.1
- flask-debugtoolbar==0.16.0
- flask-login==0.6.3
- flask-mail==0.10.0
- flask-markdown==0.3
- flask-oauthlib==0.9.6
- flask-pymongo==3.0.1
- flask-wtf==1.2.2
- humanfriendly==10.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- intervaltree==3.1.0
- invoke==2.2.0
- itsdangerous==2.2.0
- jinja2==3.1.6
- livereload==2.7.1
- loqusdb==2.6.0
- markdown==3.7
- markupsafe==3.0.2
- mongo-adapter==0.3.3
- mongomock==4.3.0
- numpy==2.0.2
- oauthlib==2.1.0
- packaging==24.2
- path==17.1.0
- path-py==12.5.0
- ped-parser==1.6.6
- pluggy==1.5.0
- pymongo==4.11.3
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-flask==1.3.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- query-phenomizer==1.2.1
- requests==2.32.3
- requests-oauthlib==1.1.0
- sentinels==1.0.0
- six==1.17.0
- sortedcontainers==2.4.0
- tomli==2.2.1
- tornado==6.4.2
- urllib3==2.3.0
- vcftoolbox==1.5.1
- visitor==0.1.3
- werkzeug==3.1.3
- wtforms==3.2.1
- zipp==3.21.0
prefix: /opt/conda/envs/scout
| [
"tests/adapter/test_query.py::test_build_cadd_inclusive"
] | [] | [
"tests/adapter/test_query.py::test_build_query",
"tests/adapter/test_query.py::test_build_thousand_g_query",
"tests/adapter/test_query.py::test_build_non_existing_thousand_g",
"tests/adapter/test_query.py::test_build_cadd_exclusive",
"tests/adapter/test_query.py::test_build_thousand_g_and_cadd",
"tests/adapter/test_query.py::test_build_chrom",
"tests/adapter/test_query.py::test_build_range"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,655 | 240 | [
"scout/adapter/mongo/query.py"
] |
|
NeuralEnsemble__python-neo-382 | fec5dfca4edd6a2a04cc0b8e274d01009a1c0362 | 2017-09-09 15:14:24 | f0285a7ab15ff6535d3e6736e0163c4fa6aea091 | diff --git a/neo/core/analogsignal.py b/neo/core/analogsignal.py
index aa971c80..fec332ea 100644
--- a/neo/core/analogsignal.py
+++ b/neo/core/analogsignal.py
@@ -251,7 +251,7 @@ class AnalogSignal(BaseNeo, pq.Quantity):
self.file_origin = getattr(obj, 'file_origin', None)
self.description = getattr(obj, 'description', None)
- # Parents objects
+ # Parent objects
self.segment = getattr(obj, 'segment', None)
self.channel_index = getattr(obj, 'channel_index', None)
@@ -437,7 +437,10 @@ class AnalogSignal(BaseNeo, pq.Quantity):
new = self.__class__(signal=signal, units=to_u,
sampling_rate=self.sampling_rate)
new._copy_data_complement(self)
+ new.channel_index = self.channel_index
+ new.segment = self.segment
new.annotations.update(self.annotations)
+
return new
def duplicate_with_new_array(self, signal):
diff --git a/neo/core/container.py b/neo/core/container.py
index f59c1641..5f4535bf 100644
--- a/neo/core/container.py
+++ b/neo/core/container.py
@@ -3,8 +3,7 @@
This module implements generic container base class that all neo container
object inherit from. It provides shared methods for all container types.
-:class:`Container` is derived from :class:`BaseNeo` but is
-defined in :module:`neo.core.analogsignalarray`.
+:class:`Container` is derived from :class:`BaseNeo`
"""
# needed for python 3 compatibility
diff --git a/neo/core/irregularlysampledsignal.py b/neo/core/irregularlysampledsignal.py
index 62e9c4ac..8d919a8d 100644
--- a/neo/core/irregularlysampledsignal.py
+++ b/neo/core/irregularlysampledsignal.py
@@ -208,6 +208,10 @@ class IrregularlySampledSignal(BaseNeo, pq.Quantity):
self.file_origin = getattr(obj, 'file_origin', None)
self.description = getattr(obj, 'description', None)
+ # Parent objects
+ self.segment = getattr(obj, 'segment', None)
+ self.channel_index = getattr(obj, 'channel_index', None)
+
def __repr__(self):
'''
Returns a string representing the :class:`IrregularlySampledSignal`.
@@ -456,6 +460,8 @@ class IrregularlySampledSignal(BaseNeo, pq.Quantity):
signal = cf * self.magnitude
new = self.__class__(times=self.times, signal=signal, units=to_u)
new._copy_data_complement(self)
+ new.channel_index = self.channel_index
+ new.segment = self.segment
new.annotations.update(self.annotations)
return new
diff --git a/neo/core/spiketrain.py b/neo/core/spiketrain.py
index 5d874d95..79e3527d 100644
--- a/neo/core/spiketrain.py
+++ b/neo/core/spiketrain.py
@@ -330,12 +330,15 @@ class SpikeTrain(BaseNeo, pq.Quantity):
if self.dimensionality == pq.quantity.validate_dimensionality(units):
return self.copy()
spikes = self.view(pq.Quantity)
- return SpikeTrain(times=spikes, t_stop=self.t_stop, units=units,
- sampling_rate=self.sampling_rate,
- t_start=self.t_start, waveforms=self.waveforms,
- left_sweep=self.left_sweep, name=self.name,
- file_origin=self.file_origin,
- description=self.description, **self.annotations)
+ obj = SpikeTrain(times=spikes, t_stop=self.t_stop, units=units,
+ sampling_rate=self.sampling_rate,
+ t_start=self.t_start, waveforms=self.waveforms,
+ left_sweep=self.left_sweep, name=self.name,
+ file_origin=self.file_origin,
+ description=self.description, **self.annotations)
+ obj.segment = self.segment
+ obj.unit = self.unit
+ return obj
def __reduce__(self):
'''
| AnalogSignal.rescale() does not preserve parent objects
The same is true for IrregularlySampledSignal and SpikeTrain. | NeuralEnsemble/python-neo | diff --git a/neo/test/coretest/test_analogsignal.py b/neo/test/coretest/test_analogsignal.py
index e56e48ab..bfadb827 100644
--- a/neo/test/coretest/test_analogsignal.py
+++ b/neo/test/coretest/test_analogsignal.py
@@ -302,7 +302,7 @@ class TestAnalogSignalArrayMethods(unittest.TestCase):
self.signal1 = AnalogSignal(self.data1quant, sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
- self.signal1.segment = 1
+ self.signal1.segment = Segment()
self.signal1.channel_index = ChannelIndex(index=[0])
def test__compliant(self):
@@ -392,8 +392,8 @@ class TestAnalogSignalArrayMethods(unittest.TestCase):
def test__copy_should_let_access_to_parents_objects(self):
##copy
result = self.signal1.copy()
- self.assertEqual(result.segment, self.signal1.segment)
- self.assertEqual(result.channel_index, self.signal1.channel_index)
+ self.assertIs(result.segment, self.signal1.segment)
+ self.assertIs(result.channel_index, self.signal1.channel_index)
## deep copy (not fixed yet)
#result = copy.deepcopy(self.signal1)
#self.assertEqual(result.segment, self.signal1.segment)
@@ -452,6 +452,11 @@ class TestAnalogSignalArrayMethods(unittest.TestCase):
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1))
assert_same_sub_schema(result, self.signal1)
+ self.assertIsInstance(result.channel_index, ChannelIndex)
+ self.assertIsInstance(result.segment, Segment)
+ self.assertIs(result.channel_index, self.signal1.channel_index)
+ self.assertIs(result.segment, self.signal1.segment)
+
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.pA)
@@ -466,6 +471,11 @@ class TestAnalogSignalArrayMethods(unittest.TestCase):
self.assertEqual(result.units, 1*pq.pA)
assert_arrays_almost_equal(np.array(result), self.data1.reshape(-1, 1)*1000., 1e-10)
+ self.assertIsInstance(result.channel_index, ChannelIndex)
+ self.assertIsInstance(result.segment, Segment)
+ self.assertIs(result.channel_index, self.signal1.channel_index)
+ self.assertIs(result.segment, self.signal1.segment)
+
def test__rescale_new_incompatible_ValueError(self):
self.assertRaises(ValueError, self.signal1.rescale, pq.mV)
diff --git a/neo/test/coretest/test_irregularysampledsignal.py b/neo/test/coretest/test_irregularysampledsignal.py
index 1d379f65..c21cf68e 100644
--- a/neo/test/coretest/test_irregularysampledsignal.py
+++ b/neo/test/coretest/test_irregularysampledsignal.py
@@ -19,7 +19,7 @@ else:
HAVE_IPYTHON = True
from neo.core.irregularlysampledsignal import IrregularlySampledSignal
-from neo.core import Segment
+from neo.core import Segment, ChannelIndex
from neo.test.tools import (assert_arrays_almost_equal, assert_arrays_equal,
assert_neo_object_is_compliant,
assert_same_sub_schema)
@@ -271,6 +271,8 @@ class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
description='eggs',
file_origin='testfile.txt',
arg1='test')
+ self.signal1.segment = Segment()
+ self.signal1.channel_index = ChannelIndex([0])
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
@@ -345,6 +347,11 @@ class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
assert_array_equal(result.times, self.time1quant)
assert_same_sub_schema(result, self.signal1)
+ self.assertIsInstance(result.channel_index, ChannelIndex)
+ self.assertIsInstance(result.segment, Segment)
+ self.assertIs(result.channel_index, self.signal1.channel_index)
+ self.assertIs(result.segment, self.signal1.segment)
+
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.uV)
@@ -360,6 +367,11 @@ class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
assert_arrays_almost_equal(np.array(result), self.data1.reshape(-1, 1)*1000., 1e-10)
assert_array_equal(result.times, self.time1quant)
+ self.assertIsInstance(result.channel_index, ChannelIndex)
+ self.assertIsInstance(result.segment, Segment)
+ self.assertIs(result.channel_index, self.signal1.channel_index)
+ self.assertIs(result.segment, self.signal1.segment)
+
def test__rescale_new_incompatible_ValueError(self):
self.assertRaises(ValueError, self.signal1.rescale, pq.nA)
@@ -550,6 +562,11 @@ class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
self.assertIsInstance(sig_as_q, pq.Quantity)
assert_array_equal(self.data1, sig_as_q.magnitude.flat)
+ def test__copy_should_preserve_parent_objects(self):
+ result = self.signal1.copy()
+ self.assertIs(result.segment, self.signal1.segment)
+ self.assertIs(result.channel_index, self.signal1.channel_index)
+
class TestIrregularlySampledSignalCombination(unittest.TestCase):
def setUp(self):
diff --git a/neo/test/coretest/test_spiketrain.py b/neo/test/coretest/test_spiketrain.py
index 47c8dd23..4a1bbacd 100644
--- a/neo/test/coretest/test_spiketrain.py
+++ b/neo/test/coretest/test_spiketrain.py
@@ -1394,11 +1394,16 @@ class TestChanging(unittest.TestCase):
def test__rescale(self):
data = [3, 4, 5] * pq.ms
train = SpikeTrain(data, t_start=0.5, t_stop=10.0)
+ train.segment = Segment()
+ train.unit = Unit()
result = train.rescale(pq.s)
assert_neo_object_is_compliant(train)
assert_neo_object_is_compliant(result)
assert_arrays_equal(train, result)
self.assertEqual(result.units, 1 * pq.s)
+ self.assertIs(result.segment, train.segment)
+ self.assertIs(result.unit, train.unit)
+
def test__rescale_same_units(self):
data = [3, 4, 5] * pq.ms
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 4
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/NeuralEnsemble/python-neo.git@fec5dfca4edd6a2a04cc0b8e274d01009a1c0362#egg=neo
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
quantities==0.13.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: python-neo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- quantities==0.13.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-neo
| [
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__rescale_new",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__rescale_same",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__copy_should_preserve_parent_objects",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__rescale_new",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__rescale_same",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__rescale"
] | [
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__add_quantity_should_preserve_data_complement",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__add_two_consistent_signals_should_preserve_data_complement",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalFunctions::test__pickle",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalProperties::test_IrregularlySampledSignal_repr",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_time_slice",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_time_slice_differnt_units",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_time_slice_empty",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_time_slice_none_both",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_time_slice_none_start",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_time_slice_none_stop",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_time_slice_out_of_boundries",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__add_two_consistent_signals_should_preserve_data_complement",
"neo/test/coretest/test_irregularysampledsignal.py::TestAnalogSignalFunctions::test__pickle",
"neo/test/coretest/test_spiketrain.py::TestPropertiesMethods::test__repr"
] | [
"neo/test/coretest/test_analogsignal.py::Test__generate_datasets::test__fake_neo__cascade",
"neo/test/coretest/test_analogsignal.py::Test__generate_datasets::test__fake_neo__nocascade",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create2D_with_copy_false_should_return_view",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_from_array_no_units_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_from_list",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_from_np_array",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_from_quantities_array",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_from_quantities_array_inconsistent_units_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_inconsistent_sampling_rate_and_period_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_with_None_sampling_rate_should_raise_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_with_None_t_start_should_raise_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_with_additional_argument",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_with_copy_false_should_return_view",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_with_copy_true_should_return_copy",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalConstructor::test__create_without_sampling_rate_or_period_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__compliant",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__duplicate_with_new_array",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__duration_getter",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__repr",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_period_getter",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_period_setter",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_period_setter_None_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_period_setter_not_quantity_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_rate_getter",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_rate_setter",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_rate_setter_None_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__sampling_rate_setter_not_quantity_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__t_start_setter_None_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__t_stop_getter",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalProperties::test__times_getter",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__comparison_with_inconsistent_units_should_raise_Exception",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__compliant",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__copy_should_let_access_to_parents_objects",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__getitem_out_of_bounds_IndexError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__getitem_should_return_single_quantity",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__rescale_new_incompatible_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__simple_statistics",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__slice_should_change_sampling_period",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__slice_should_let_access_to_parents_objects",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__slice_should_modify_linked_channelindex",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test__slice_should_return_AnalogSignalArray",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test_as_array",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test_as_quantity",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalArrayMethods::test_comparison_operators",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalEquality::test__signals_with_different_data_complement_should_be_not_equal",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__add_const_quantity_should_preserve_data_complement",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__add_signals_with_inconsistent_data_complement_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__compliant",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__divide_by_const_should_preserve_data_complement",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__mult_by_const_float_should_preserve_data_complement",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__subtract_const_should_preserve_data_complement",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalCombination::test__subtract_from_const_should_return_signal",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalSampling::test___get_sampling_rate__period_array_rate_none_TypeError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalSampling::test___get_sampling_rate__period_none_rate_float_TypeError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalSampling::test___get_sampling_rate__period_none_rate_none_ValueError",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalSampling::test___get_sampling_rate__period_none_rate_quant",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalSampling::test___get_sampling_rate__period_quant_rate_none",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalSampling::test___get_sampling_rate__period_rate_equivalent",
"neo/test/coretest/test_analogsignal.py::TestAnalogSignalSampling::test___get_sampling_rate__period_rate_not_equivalent_ValueError",
"neo/test/coretest/test_irregularysampledsignal.py::Test__generate_datasets::test__fake_neo__cascade",
"neo/test/coretest/test_irregularysampledsignal.py::Test__generate_datasets::test__fake_neo__nocascade",
"neo/test/coretest/test_irregularysampledsignal.py::Test__generate_datasets::test__get_fake_values",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalConstruction::test_IrregularlySampledSignal_creation_times_units_signal_units",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalConstruction::test_IrregularlySampledSignal_creation_units_arg",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalConstruction::test_IrregularlySampledSignal_creation_units_rescale",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalConstruction::test_IrregularlySampledSignal_different_lens_ValueError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalConstruction::test_IrregularlySampledSignal_no_signal_units_ValueError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalConstruction::test_IrregularlySampledSignal_no_time_units_ValueError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalProperties::test__compliant",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalProperties::test__duration_getter",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalProperties::test__sampling_intervals_getter",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalProperties::test__t_start_getter",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalProperties::test__t_stop_getter",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__comparison_with_inconsistent_units_should_raise_Exception",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__compliant",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__getitem_out_of_bounds_IndexError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__getitem_should_return_single_quantity",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__rescale_new_incompatible_ValueError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test__slice_should_return_IrregularlySampledSignal",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_as_array",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_as_quantity",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_comparison_operators",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_mean_interpolation_NotImplementedError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_resample_NotImplementedError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalArrayMethods::test_simple_statistics",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__add_const_quantity_should_preserve_data_complement",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__add_signals_with_inconsistent_dimension_ValueError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__add_signals_with_inconsistent_times_AssertionError",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__compliant",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__divide_signal_by_const_should_preserve_data_complement",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__mult_signal_by_const_array_should_preserve_data_complement",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__mult_signal_by_const_float_should_preserve_data_complement",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__subtract_const_should_preserve_data_complement",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalCombination::test__subtract_from_const_should_return_signal",
"neo/test/coretest/test_irregularysampledsignal.py::TestIrregularlySampledSignalEquality::test__signals_with_different_times_should_be_not_equal",
"neo/test/coretest/test_spiketrain.py::Test__generate_datasets::test__fake_neo__cascade",
"neo/test/coretest/test_spiketrain.py::Test__generate_datasets::test__fake_neo__nocascade",
"neo/test/coretest/test_spiketrain.py::Test__generate_datasets::test__get_fake_values",
"neo/test/coretest/test_spiketrain.py::Testcheck_has_dimensions_time::test__check_has_dimensions_time",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_above",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_above_below",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_above_below_scale",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_above_scale",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_below",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_below_scale",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_empty_array",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_exact",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_inside",
"neo/test/coretest/test_spiketrain.py::Testcheck_time_in_range::test__check_time_in_range_scale",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_empty",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_empty_no_t_start",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array_no_start_stop_units",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array_no_start_stop_units_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array_no_start_stop_units_with_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array_with_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array_with_incompatible_units_ValueError",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_array_without_units_should_raise_ValueError",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_list",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_list_no_start_stop_units",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_list_no_start_stop_units_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_list_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_list_without_units_should_raise_ValueError",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_no_start_stop_units",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_no_start_stop_units_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_no_start_stop_units_with_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_units",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_units_no_start_stop_units",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_units_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_units_with_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_array_with_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_from_quantity_units_no_start_stop_units_set_dtype",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_minimal",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_with_len_times_different_size_than_waveform_shape1_ValueError",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test__create_with_times_outside_tstart_tstop_ValueError",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test_default_tstart",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test_defaults",
"neo/test/coretest/test_spiketrain.py::TestConstructor::test_tstop_units_conversion",
"neo/test/coretest/test_spiketrain.py::TestSorting::test_sort",
"neo/test/coretest/test_spiketrain.py::TestSlice::test_compliant",
"neo/test/coretest/test_spiketrain.py::TestSlice::test_slice",
"neo/test/coretest/test_spiketrain.py::TestSlice::test_slice_from_beginning",
"neo/test/coretest/test_spiketrain.py::TestSlice::test_slice_negative_idxs",
"neo/test/coretest/test_spiketrain.py::TestSlice::test_slice_to_end",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_compliant",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_differnt_units",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_empty",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_matching_ends",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_none_both",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_none_start",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_none_stop",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_out_of_boundries",
"neo/test/coretest/test_spiketrain.py::TestTimeSlice::test_time_slice_typical",
"neo/test/coretest/test_spiketrain.py::TestDuplicateWithNewData::test_deep_copy_attributes",
"neo/test/coretest/test_spiketrain.py::TestDuplicateWithNewData::test_duplicate_with_new_data",
"neo/test/coretest/test_spiketrain.py::TestAttributesAnnotations::test_annotations",
"neo/test/coretest/test_spiketrain.py::TestAttributesAnnotations::test_autoset_universally_recommended_attributes",
"neo/test/coretest/test_spiketrain.py::TestAttributesAnnotations::test_set_universally_recommended_attributes",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__adding_time",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__changing_multiple_spiketimes",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__changing_multiple_spiketimes_should_check_time_in_range",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__changing_spiketime_should_check_time_in_range",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__rescale_incompatible_units_ValueError",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__rescale_same_units",
"neo/test/coretest/test_spiketrain.py::TestChanging::test__subtracting_time",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_default",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_default_and_data_not_quantity",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_false",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_false_and_data_not_quantity",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_false_and_dtype_change",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_false_and_fake_rescale",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_false_and_rescale_true",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_true",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_change_with_copy_true_and_data_not_quantity",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_changing_slice_changes_original_spiketrain",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_changing_slice_changes_original_spiketrain_with_copy_false",
"neo/test/coretest/test_spiketrain.py::TestChanging::test_init_with_rescale",
"neo/test/coretest/test_spiketrain.py::TestPropertiesMethods::test__children",
"neo/test/coretest/test_spiketrain.py::TestPropertiesMethods::test__compliant",
"neo/test/coretest/test_spiketrain.py::TestPropertiesMethods::test__duration",
"neo/test/coretest/test_spiketrain.py::TestPropertiesMethods::test__right_sweep",
"neo/test/coretest/test_spiketrain.py::TestPropertiesMethods::test__sampling_period",
"neo/test/coretest/test_spiketrain.py::TestPropertiesMethods::test__spike_duration",
"neo/test/coretest/test_spiketrain.py::TestMiscellaneous::test__different_dtype_for_t_start_and_array",
"neo/test/coretest/test_spiketrain.py::TestMiscellaneous::test_as_array",
"neo/test/coretest/test_spiketrain.py::TestMiscellaneous::test_as_quantity"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,663 | 988 | [
"neo/core/analogsignal.py",
"neo/core/container.py",
"neo/core/irregularlysampledsignal.py",
"neo/core/spiketrain.py"
] |
|
OpenMined__PySyft-232 | 6c049ac8c4c2e9598bbd495d9a5fd716d3e46126 | 2017-09-12 20:03:09 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/tensor.py b/syft/tensor.py
index 0aed1c3ef8..7d31e2e430 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -1034,6 +1034,67 @@ class TensorBase(object):
hist, edges = np.histogram(np.array(self.data), bins=bins, range=(min, max))
return TensorBase(hist)
+ def scatter_(self, dim, index, src):
+ """
+ Writes all values from the Tensor src into self at the indices specified in the index Tensor.
+ The indices are specified with respect to the given dimension, dim, in the manner described in gather().
+
+ :param dim: The axis along which to index
+ :param index: The indices of elements to scatter
+ :param src: The source element(s) to scatter
+ :return: self
+ """
+ index = _ensure_tensorbase(index)
+ if self.encrypted or index.encrypted:
+ return NotImplemented
+ if index.data.dtype != np.dtype('int_'):
+ raise TypeError("The values of index must be integers")
+ if self.data.ndim != index.data.ndim:
+ raise ValueError("Index should have the same number of dimensions as output")
+ if dim >= self.data.ndim or dim < -self.data.ndim:
+ raise IndexError("dim is out of range")
+ if dim < 0:
+ # Not sure why scatter should accept dim < 0, but that is the behavior in PyTorch's scatter
+ dim = self.data.ndim + dim
+ idx_xsection_shape = index.data.shape[:dim] + index.data.shape[dim + 1:]
+ self_xsection_shape = self.data.shape[:dim] + self.data.shape[dim + 1:]
+ if idx_xsection_shape != self_xsection_shape:
+ raise ValueError("Except for dimension " + str(dim) +
+ ", all dimensions of index and output should be the same size")
+ if (index.data >= self.data.shape[dim]).any() or (index.data < 0).any():
+ raise IndexError("The values of index must be between 0 and (self.data.shape[dim] -1)")
+
+ def make_slice(arr, dim, i):
+ slc = [slice(None)] * arr.ndim
+ slc[dim] = i
+ return slc
+
+ # We use index and dim parameters to create idx
+ # idx is in a form that can be used as a NumPy advanced index for scattering of src param. in self.data
+ idx = [[*np.indices(idx_xsection_shape).reshape(index.data.ndim - 1, -1),
+ index.data[make_slice(index.data, dim, i)].reshape(1, -1)[0]] for i in range(index.data.shape[dim])]
+ idx = list(np.concatenate(idx, axis=1))
+ idx.insert(dim, idx.pop())
+
+ if not np.isscalar(src):
+ src = _ensure_tensorbase(src)
+ if index.data.shape[dim] > src.data.shape[dim]:
+ raise IndexError("Dimension " + str(dim) + "of index can not be bigger than that of src ")
+ src_shape = src.data.shape[:dim] + src.data.shape[dim + 1:]
+ if idx_xsection_shape != src_shape:
+ raise ValueError("Except for dimension " +
+ str(dim) + ", all dimensions of index and src should be the same size")
+ # src_idx is a NumPy advanced index for indexing of elements in the src
+ src_idx = list(idx)
+ src_idx.pop(dim)
+ src_idx.insert(dim, np.repeat(np.arange(index.data.shape[dim]), np.prod(idx_xsection_shape)))
+ self.data[idx] = src.data[src_idx]
+
+ else:
+ self.data[idx] = src
+
+ return self
+
def serialize(self):
return pickle.dumps(self)
| Implement Default scatter Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, I want to leverage a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, scatter_() should operate inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index d666b46e88..f586ff59ac 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -726,5 +726,100 @@ class notEqualTests(unittest.TestCase):
self.assertTrue(syft.equal(t1, TensorBase([1, 1, 1, 0])))
+class scatterTests(unittest.TestCase):
+ def testScatter_Numerical0(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
+ src = 1.0
+ dim = 0
+ t.scatter_(dim=dim, index=idx, src=src)
+ self.assertTrue(np.array_equal(t.data, np.array([[1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
+
+ def testScatter_Numerical1(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0], [0], [0]]))
+ src = 1.0
+ dim = 1
+ t.scatter_(dim=dim, index=idx, src=src)
+ self.assertTrue(np.array_equal(t.data, np.array([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0]])))
+
+ def testScatter_Numerical2(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0], [0], [0]]))
+ src = 1.0
+ dim = -1
+ t.scatter_(dim=dim, index=idx, src=src)
+ self.assertTrue(np.array_equal(t.data, np.array([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0]])))
+
+ def testScatter_Numerical3(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
+ src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
+ dim = 0
+ t.scatter_(dim=dim, index=idx, src=src)
+ self.assertTrue(np.array_equal(t.data, np.array([[1, 2, 3, 4, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
+
+ def testScatter_Numerical4(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
+ src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
+ dim = -2
+ t.scatter_(dim=dim, index=idx, src=src)
+ self.assertTrue(np.array_equal(t.data, np.array([[1, 2, 3, 4, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
+
+ def testScatter_Numerical5(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]))
+ src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
+ dim = 0
+ t.scatter_(dim=dim, index=idx, src=src)
+ self.assertTrue(np.array_equal(t.data, np.array([[6, 7, 8, 9, 10], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
+
+ def testScatter_Numerical6(self):
+ t = TensorBase(np.zeros((3, 4, 5)))
+ idx = [[[3, 0, 1, 1, 2], [0, 3, 3, 3, 3]], [[2, 0, 0, 0, 0], [2, 1, 0, 2, 0]],
+ [[0, 0, 1, 0, 2], [1, 3, 2, 2, 2]]]
+ src = [[[7, 84, 99, 71, 44], [79, 57, 2, 37, 62]], [[31, 44, 43, 54, 56], [72, 52, 21, 89, 95]],
+ [[5, 3, 99, 4, 52], [32, 88, 58, 62, 9]]]
+ dim = 1
+ t.scatter_(dim=dim, index=idx, src=src)
+ expected = [[[79, 84, 0, 0, 0], [0, 0, 99, 71, 0], [0, 0, 0, 0, 44], [7, 57, 2, 37, 62]],
+ [[0, 44, 21, 54, 95], [0, 52, 0, 0, 0], [72, 0, 0, 89, 0], [0, 0, 0, 0, 0]],
+ [[5, 3, 0, 4, 0], [32, 0, 99, 0, 0], [0, 0, 58, 62, 9], [0, 88, 0, 0, 0]]]
+ self.assertTrue(np.array_equal(t.data, np.array(expected)))
+
+ def testScatter_IndexType(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0.0, 0.0, 0.0, 0.0, 0.0]]))
+ src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
+ dim = 0
+ with self.assertRaises(Exception):
+ t.scatter_(dim=dim, index=idx, src=src)
+
+ def testScatter_IndexOutOfRange(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[5, 0, 0, 0, 0]]))
+ src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
+ dim = 0
+ with self.assertRaises(Exception):
+ t.scatter_(dim=dim, index=idx, src=src)
+
+ def testScatter_DimOutOfRange(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
+ src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
+ dim = 4
+ with self.assertRaises(Exception):
+ t.scatter_(dim=dim, index=idx, src=src)
+
+ def testScatter_index_src_dimension_mismatch(self):
+ t = TensorBase(np.zeros((3, 5)))
+ idx = TensorBase(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]))
+ src = TensorBase(np.array([[1, 2, 3, 4, 5]]))
+ dim = 1
+ with self.assertRaises(Exception):
+ t.scatter_(dim=dim, index=idx, src=src)
+
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
attrs==22.2.0
certifi==2021.5.30
clint==0.5.1
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
line-profiler==4.1.3
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
phe==1.5.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pyRserve==1.0.4
pytest==7.0.1
pytest-flake8==1.1.1
scipy==1.5.4
-e git+https://github.com/OpenMined/PySyft.git@6c049ac8c4c2e9598bbd495d9a5fd716d3e46126#egg=syft
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- attrs==22.2.0
- clint==0.5.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- line-profiler==4.1.3
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- phe==1.5.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pyrserve==1.0.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- scipy==1.5.4
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::scatterTests::testScatter_Numerical0",
"tests/test_tensor.py::scatterTests::testScatter_Numerical1",
"tests/test_tensor.py::scatterTests::testScatter_Numerical2",
"tests/test_tensor.py::scatterTests::testScatter_Numerical3",
"tests/test_tensor.py::scatterTests::testScatter_Numerical4",
"tests/test_tensor.py::scatterTests::testScatter_Numerical5",
"tests/test_tensor.py::scatterTests::testScatter_Numerical6"
] | [] | [
"tests/test_tensor.py::DimTests::testAsView",
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::DimTests::testResize",
"tests/test_tensor.py::DimTests::testResizeAs",
"tests/test_tensor.py::DimTests::testSize",
"tests/test_tensor.py::DimTests::testView",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::ZeroTests::testZero",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MaxTests::testAxis",
"tests/test_tensor.py::MaxTests::testNoDim",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::transposeTests::testT",
"tests/test_tensor.py::transposeTests::testT_",
"tests/test_tensor.py::transposeTests::testTranspose",
"tests/test_tensor.py::transposeTests::testTranspose_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::signTests::testsign",
"tests/test_tensor.py::signTests::testsign_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_",
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_",
"tests/test_tensor.py::clampTests::testClampFloat",
"tests/test_tensor.py::clampTests::testClampFloatInPlace",
"tests/test_tensor.py::clampTests::testClampInt",
"tests/test_tensor.py::clampTests::testClampIntInPlace",
"tests/test_tensor.py::cloneTests::testClone",
"tests/test_tensor.py::chunkTests::testChunk",
"tests/test_tensor.py::chunkTests::testChunkSameSize",
"tests/test_tensor.py::bernoulliTests::testBernoulli",
"tests/test_tensor.py::bernoulliTests::testBernoulli_",
"tests/test_tensor.py::uniformTests::testUniform",
"tests/test_tensor.py::uniformTests::testUniform_",
"tests/test_tensor.py::fillTests::testFill_",
"tests/test_tensor.py::topkTests::testTopK",
"tests/test_tensor.py::tolistTests::testToList",
"tests/test_tensor.py::traceTests::testTrace",
"tests/test_tensor.py::roundTests::testRound",
"tests/test_tensor.py::roundTests::testRound_",
"tests/test_tensor.py::repeatTests::testRepeat",
"tests/test_tensor.py::powTests::testPow",
"tests/test_tensor.py::powTests::testPow_",
"tests/test_tensor.py::prodTests::testProd",
"tests/test_tensor.py::randomTests::testRandom_",
"tests/test_tensor.py::nonzeroTests::testNonZero",
"tests/test_tensor.py::cumprodTest::testCumprod",
"tests/test_tensor.py::cumprodTest::testCumprod_",
"tests/test_tensor.py::splitTests::testSplit",
"tests/test_tensor.py::squeezeTests::testSqueeze",
"tests/test_tensor.py::expandAsTests::testExpandAs",
"tests/test_tensor.py::meanTests::testMean",
"tests/test_tensor.py::notEqualTests::testNe",
"tests/test_tensor.py::notEqualTests::testNe_",
"tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexType",
"tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch"
] | [] | Apache License 2.0 | 1,673 | 889 | [
"syft/tensor.py"
] |
|
OpenMined__PySyft-237 | 1df96853cc9f1e97f7b106eae3de5a6f62284809 | 2017-09-13 22:12:37 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/tensor.py b/syft/tensor.py
index 8ed5efad8d..6cf00090e3 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -1160,6 +1160,22 @@ class TensorBase(object):
def deserialize(b):
return pickle.loads(b)
+ def index_select(self, dim, index):
+ """
+ Returns a new Tensor which indexes the ``input`` Tensor along
+ dimension ``dim`` using the entries in ``index``.
+
+ :param dim: dimension in which to index
+ :param index: 1D tensor containing the indices to index
+ :return: Tensor of selected indices
+ """
+ index = _ensure_tensorbase(index)
+ if self.encrypted or index.encrypted:
+ return NotImplemented
+ if index.data.ndim > 1:
+ raise ValueError("Index is supposed to be 1D")
+ return TensorBase(self.data.take(index, axis=dim))
+
def mv(self, tensorvector):
if self.encrypted:
raise NotImplemented
| Implement Default select Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, I want to leverage a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, select() should return a new tensor. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index f876fdf5ef..365a9e96dc 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -751,6 +751,16 @@ class notEqualTests(unittest.TestCase):
self.assertTrue(syft.equal(t1, TensorBase([1, 1, 1, 0])))
+class index_selectTests(unittest.TestCase):
+ def testIndex_select(self):
+ t = TensorBase(np.reshape(np.arange(0, 2 * 3 * 4), (2, 3, 4)))
+ idx = np.array([1, 0])
+ dim = 2
+ result = t.index_select(dim=dim, index=idx)
+ expected = np.array([[[1, 0], [5, 4], [9, 8]], [[13, 12], [17, 16], [21, 20]]])
+ self.assertTrue(np.array_equal(result.data, expected))
+
+
class gatherTests(unittest.TestCase):
def testGatherNumerical1(self):
t = TensorBase(np.array([[65, 17], [14, 25], [76, 22]]))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"line_profiler",
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
clint==0.5.1
exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
line_profiler==4.2.0
mccabe==0.7.0
numpy==1.26.4
packaging==24.2
phe==1.5.0
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyRserve==1.0.4
pytest==8.3.5
pytest-flake8==1.3.0
scipy==1.13.1
-e git+https://github.com/OpenMined/PySyft.git@1df96853cc9f1e97f7b106eae3de5a6f62284809#egg=syft
tomli==2.2.1
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- clint==0.5.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- line-profiler==4.2.0
- mccabe==0.7.0
- numpy==1.26.4
- packaging==24.2
- phe==1.5.0
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyrserve==1.0.4
- pytest==8.3.5
- pytest-flake8==1.3.0
- scipy==1.13.1
- tomli==2.2.1
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::index_selectTests::testIndex_select"
] | [
"tests/test_tensor.py::scatterTests::testScatter_Numerical0",
"tests/test_tensor.py::scatterTests::testScatter_Numerical1",
"tests/test_tensor.py::scatterTests::testScatter_Numerical2",
"tests/test_tensor.py::scatterTests::testScatter_Numerical3",
"tests/test_tensor.py::scatterTests::testScatter_Numerical4",
"tests/test_tensor.py::scatterTests::testScatter_Numerical5",
"tests/test_tensor.py::scatterTests::testScatter_Numerical6"
] | [
"tests/test_tensor.py::DimTests::testAsView",
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::DimTests::testResize",
"tests/test_tensor.py::DimTests::testResizeAs",
"tests/test_tensor.py::DimTests::testSize",
"tests/test_tensor.py::DimTests::testView",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::ZeroTests::testZero",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MaxTests::testAxis",
"tests/test_tensor.py::MaxTests::testNoDim",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::transposeTests::testT",
"tests/test_tensor.py::transposeTests::testT_",
"tests/test_tensor.py::transposeTests::testTranspose",
"tests/test_tensor.py::transposeTests::testTranspose_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::signTests::testsign",
"tests/test_tensor.py::signTests::testsign_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_",
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_",
"tests/test_tensor.py::clampTests::testClampFloat",
"tests/test_tensor.py::clampTests::testClampFloatInPlace",
"tests/test_tensor.py::clampTests::testClampInt",
"tests/test_tensor.py::clampTests::testClampIntInPlace",
"tests/test_tensor.py::cloneTests::testClone",
"tests/test_tensor.py::chunkTests::testChunk",
"tests/test_tensor.py::chunkTests::testChunkSameSize",
"tests/test_tensor.py::gtTests::testGtInPlaceWithNumber",
"tests/test_tensor.py::gtTests::testGtInPlaceWithTensor",
"tests/test_tensor.py::gtTests::testGtWithNumber",
"tests/test_tensor.py::gtTests::testGtWithTensor",
"tests/test_tensor.py::bernoulliTests::testBernoulli",
"tests/test_tensor.py::bernoulliTests::testBernoulli_",
"tests/test_tensor.py::uniformTests::testUniform",
"tests/test_tensor.py::uniformTests::testUniform_",
"tests/test_tensor.py::fillTests::testFill_",
"tests/test_tensor.py::topkTests::testTopK",
"tests/test_tensor.py::tolistTests::testToList",
"tests/test_tensor.py::traceTests::testTrace",
"tests/test_tensor.py::roundTests::testRound",
"tests/test_tensor.py::roundTests::testRound_",
"tests/test_tensor.py::repeatTests::testRepeat",
"tests/test_tensor.py::powTests::testPow",
"tests/test_tensor.py::powTests::testPow_",
"tests/test_tensor.py::prodTests::testProd",
"tests/test_tensor.py::randomTests::testRandom_",
"tests/test_tensor.py::nonzeroTests::testNonZero",
"tests/test_tensor.py::cumprodTest::testCumprod",
"tests/test_tensor.py::cumprodTest::testCumprod_",
"tests/test_tensor.py::splitTests::testSplit",
"tests/test_tensor.py::squeezeTests::testSqueeze",
"tests/test_tensor.py::expandAsTests::testExpandAs",
"tests/test_tensor.py::meanTests::testMean",
"tests/test_tensor.py::notEqualTests::testNe",
"tests/test_tensor.py::notEqualTests::testNe_",
"tests/test_tensor.py::gatherTests::testGatherNumerical1",
"tests/test_tensor.py::gatherTests::testGatherNumerical2",
"tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexType",
"tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch"
] | [] | Apache License 2.0 | 1,677 | 265 | [
"syft/tensor.py"
] |
|
ucfopen__canvasapi-76 | 7eb0ec8ec2d8b9c5b6036edb3a93014b241c4fe6 | 2017-09-15 17:55:20 | f2faa1835e104aae764a1fc7638c284d2888639f | diff --git a/canvasapi/course.py b/canvasapi/course.py
index 8cd5329..7b87e2d 100644
--- a/canvasapi/course.py
+++ b/canvasapi/course.py
@@ -1,4 +1,5 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+from warnings import warn
from six import python_2_unicode_compatible
@@ -1134,12 +1135,15 @@ class Course(CanvasObject):
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.submission.Submission`
"""
+ if 'grouped' in kwargs:
+ warn('The `grouped` parameter must be empty. Removing kwarg `grouped`.')
+ del kwargs['grouped']
+
return PaginatedList(
Submission,
self._requester,
'GET',
'courses/%s/students/submissions' % (self.id),
- grouped=False,
_kwargs=combine_kwargs(**kwargs)
)
diff --git a/canvasapi/section.py b/canvasapi/section.py
index 5d74533..f9f86ca 100644
--- a/canvasapi/section.py
+++ b/canvasapi/section.py
@@ -1,4 +1,5 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+from warnings import warn
from six import python_2_unicode_compatible
@@ -157,12 +158,15 @@ class Section(CanvasObject):
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.submission.Submission`
"""
+ if 'grouped' in kwargs:
+ warn('The `grouped` parameter must be empty. Removing kwarg `grouped`.')
+ del kwargs['grouped']
+
return PaginatedList(
Submission,
self._requester,
'GET',
'sections/%s/students/submissions' % (self.id),
- grouped=False,
_kwargs=combine_kwargs(**kwargs)
)
| Course.list_multiple_submissions mismanages grouped response
The `Course.list_multiple_submissions` method returns a paginated list of `Submission` instances. However, printing any of these instances using its default string conversion fails:
```python
submissions = course.list_multiple_submissions(student_ids='all', assignment_ids=(123, 456))
for submission in submissions:
print(submission)
```
> `AttributeError: 'Submission' object has no attribute 'id'`
If instead I print each submission’s `__class__` and attribute dictionary, I see something rather strange. The class is indeed `canvasapi.submission.Submission`. However, the attributes are not those that a `Submission` should have. Ignoring the double-underscore internal attributes, each `submission` instance has:
- `_requester`
- `attributes`
- `integration_id`
- `section_id`
- `set_attributes`
- `sis_user_id`
- `submissions`
- `to_json`
- `user_id`
The `sis_user_id` and `user_id` attributes tell me that this is some sort of person-identifying structure along the lines of a `User` or `Enrollment` object. But it does not have the complete set of attributes for either of those. The value of the `submissions` attribute is a list of dicts; I believe that each of these dicts corresponds to one `Submission` object.
The `Course.list_multiple_submissions` method produces a `GET /api/v1/courses/:course_id/students/submissions` request, and [the documentation for that request](https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students) shows that its response can take one of two forms depending on the `grouped` parameter. I can see that the `Course.list_multiple_submissions` implementation always hard-codes `grouped=False`, but the response coming back looks exactly like the Canvas API documentation example of a *grouped* response, complete with those `submissions` attributes giving lists of `Submission` objects.
So it seems that the `grouped=False` aspect of the request is not working as intended. I don’t know why; is this a Canvas bug or a `canvasapi` bug? Either way, the current behavior certainly is not working as intended. `canvasapi` should either request a non-grouped response in a way that works, or else it should process the grouped response in a way that respects the actual structure and meaning of the returned data.
For now I am working around this myself by building new `Submission` instances out of the dicts in each of the `submissions` attributes, like this:
```python
groups = course.list_multiple_submissions(student_ids='all', assignment_ids=assignmentIds)
submissions = [Submission(course._requester, raw) for raw in chain.from_iterable(group.submissions for group in groups)]
```
But clearly that is not how the API is intended to be used. | ucfopen/canvasapi | diff --git a/tests/test_course.py b/tests/test_course.py
index 09b5e4c..b2bc030 100644
--- a/tests/test_course.py
+++ b/tests/test_course.py
@@ -1,7 +1,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+import os
import unittest
import uuid
-import os
+import warnings
import requests_mock
from six import text_type
@@ -647,6 +648,25 @@ class TestCourse(unittest.TestCase):
self.assertEqual(len(submission_list), 2)
self.assertIsInstance(submission_list[0], Submission)
+ def test_list_multiple_submissions_grouped_param(self, m):
+ register_uris({'course': ['list_multiple_submissions']}, m)
+
+ with warnings.catch_warnings(record=True) as warning_list:
+ warnings.simplefilter('always')
+ submissions = self.course.list_multiple_submissions(grouped=True)
+ submission_list = [submission for submission in submissions]
+
+ # Ensure using the `grouped` param raises a warning
+ self.assertEqual(len(warning_list), 1)
+ self.assertEqual(warning_list[-1].category, UserWarning)
+ self.assertEqual(
+ text_type(warning_list[-1].message),
+ 'The `grouped` parameter must be empty. Removing kwarg `grouped`.'
+ )
+
+ self.assertEqual(len(submission_list), 2)
+ self.assertIsInstance(submission_list[0], Submission)
+
# get_submission()
def test_get_submission(self, m):
register_uris({'course': ['get_submission']}, m)
diff --git a/tests/test_section.py b/tests/test_section.py
index c26ba21..b3313fa 100644
--- a/tests/test_section.py
+++ b/tests/test_section.py
@@ -1,7 +1,9 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
+import warnings
import requests_mock
+from six import text_type
from canvasapi import Canvas
from canvasapi.enrollment import Enrollment
@@ -104,6 +106,25 @@ class TestSection(unittest.TestCase):
self.assertEqual(len(submission_list), 2)
self.assertIsInstance(submission_list[0], Submission)
+ def test_list_multiple_submissions_grouped_param(self, m):
+ register_uris({'section': ['list_multiple_submissions']}, m)
+
+ with warnings.catch_warnings(record=True) as warning_list:
+ warnings.simplefilter('always')
+ submissions = self.section.list_multiple_submissions(grouped=True)
+ submission_list = [submission for submission in submissions]
+
+ # Ensure using the `grouped` param raises a warning
+ self.assertEqual(len(warning_list), 1)
+ self.assertEqual(warning_list[-1].category, UserWarning)
+ self.assertEqual(
+ text_type(warning_list[-1].message),
+ 'The `grouped` parameter must be empty. Removing kwarg `grouped`.'
+ )
+
+ self.assertEqual(len(submission_list), 2)
+ self.assertIsInstance(submission_list[0], Submission)
+
# get_submission()
def test_get_submission(self, m):
register_uris({'section': ['get_submission']}, m)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
-e git+https://github.com/ucfopen/canvasapi.git@7eb0ec8ec2d8b9c5b6036edb3a93014b241c4fe6#egg=canvasapi
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.18.1
execnet==1.9.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.10.0
pyflakes==3.0.1
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
pytz==2025.2
requests==2.27.1
requests-mock==1.12.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.18.1
- execnet==1.9.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.10.0
- pyflakes==3.0.1
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- pytz==2025.2
- requests==2.27.1
- requests-mock==1.12.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_course.py::TestCourse::test_list_multiple_submissions_grouped_param",
"tests/test_section.py::TestSection::test_list_multiple_submissions_grouped_param"
] | [] | [
"tests/test_course.py::TestCourse::test__str__",
"tests/test_course.py::TestCourse::test_conclude",
"tests/test_course.py::TestCourse::test_course_files",
"tests/test_course.py::TestCourse::test_create_assignment",
"tests/test_course.py::TestCourse::test_create_assignment_fail",
"tests/test_course.py::TestCourse::test_create_assignment_group",
"tests/test_course.py::TestCourse::test_create_course_section",
"tests/test_course.py::TestCourse::test_create_discussion_topic",
"tests/test_course.py::TestCourse::test_create_external_feed",
"tests/test_course.py::TestCourse::test_create_external_tool",
"tests/test_course.py::TestCourse::test_create_folder",
"tests/test_course.py::TestCourse::test_create_group_category",
"tests/test_course.py::TestCourse::test_create_module",
"tests/test_course.py::TestCourse::test_create_module_fail",
"tests/test_course.py::TestCourse::test_create_page",
"tests/test_course.py::TestCourse::test_create_page_fail",
"tests/test_course.py::TestCourse::test_create_quiz",
"tests/test_course.py::TestCourse::test_create_quiz_fail",
"tests/test_course.py::TestCourse::test_delete",
"tests/test_course.py::TestCourse::test_delete_external_feed",
"tests/test_course.py::TestCourse::test_edit_front_page",
"tests/test_course.py::TestCourse::test_enroll_user",
"tests/test_course.py::TestCourse::test_get_assignment",
"tests/test_course.py::TestCourse::test_get_assignment_group",
"tests/test_course.py::TestCourse::test_get_assignments",
"tests/test_course.py::TestCourse::test_get_course_level_assignment_data",
"tests/test_course.py::TestCourse::test_get_course_level_participation_data",
"tests/test_course.py::TestCourse::test_get_course_level_student_summary_data",
"tests/test_course.py::TestCourse::test_get_discussion_topic",
"tests/test_course.py::TestCourse::test_get_discussion_topics",
"tests/test_course.py::TestCourse::test_get_enrollments",
"tests/test_course.py::TestCourse::test_get_external_tool",
"tests/test_course.py::TestCourse::test_get_external_tools",
"tests/test_course.py::TestCourse::test_get_file",
"tests/test_course.py::TestCourse::test_get_folder",
"tests/test_course.py::TestCourse::test_get_full_discussion_topic",
"tests/test_course.py::TestCourse::test_get_module",
"tests/test_course.py::TestCourse::test_get_modules",
"tests/test_course.py::TestCourse::test_get_page",
"tests/test_course.py::TestCourse::test_get_pages",
"tests/test_course.py::TestCourse::test_get_quiz",
"tests/test_course.py::TestCourse::test_get_quiz_fail",
"tests/test_course.py::TestCourse::test_get_quizzes",
"tests/test_course.py::TestCourse::test_get_recent_students",
"tests/test_course.py::TestCourse::test_get_section",
"tests/test_course.py::TestCourse::test_get_settings",
"tests/test_course.py::TestCourse::test_get_submission",
"tests/test_course.py::TestCourse::test_get_user",
"tests/test_course.py::TestCourse::test_get_user_id_type",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_assignment_data",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_messaging_data",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_participation_data",
"tests/test_course.py::TestCourse::test_get_users",
"tests/test_course.py::TestCourse::test_list_assignment_groups",
"tests/test_course.py::TestCourse::test_list_external_feeds",
"tests/test_course.py::TestCourse::test_list_folders",
"tests/test_course.py::TestCourse::test_list_gradeable_students",
"tests/test_course.py::TestCourse::test_list_group_categories",
"tests/test_course.py::TestCourse::test_list_groups",
"tests/test_course.py::TestCourse::test_list_multiple_submissions",
"tests/test_course.py::TestCourse::test_list_sections",
"tests/test_course.py::TestCourse::test_list_submissions",
"tests/test_course.py::TestCourse::test_list_tabs",
"tests/test_course.py::TestCourse::test_mark_submission_as_read",
"tests/test_course.py::TestCourse::test_mark_submission_as_unread",
"tests/test_course.py::TestCourse::test_preview_html",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics_no_list",
"tests/test_course.py::TestCourse::test_reset",
"tests/test_course.py::TestCourse::test_show_front_page",
"tests/test_course.py::TestCourse::test_subit_assignment_fail",
"tests/test_course.py::TestCourse::test_submit_assignment",
"tests/test_course.py::TestCourse::test_update",
"tests/test_course.py::TestCourse::test_update_settings",
"tests/test_course.py::TestCourse::test_update_submission",
"tests/test_course.py::TestCourse::test_update_tab",
"tests/test_course.py::TestCourse::test_upload",
"tests/test_course.py::TestCourseNickname::test__str__",
"tests/test_course.py::TestCourseNickname::test_remove",
"tests/test_section.py::TestSection::test__str__",
"tests/test_section.py::TestSection::test_cross_list_section",
"tests/test_section.py::TestSection::test_decross_list_section",
"tests/test_section.py::TestSection::test_delete",
"tests/test_section.py::TestSection::test_edit",
"tests/test_section.py::TestSection::test_get_enrollments",
"tests/test_section.py::TestSection::test_get_submission",
"tests/test_section.py::TestSection::test_list_multiple_submissions",
"tests/test_section.py::TestSection::test_list_submissions",
"tests/test_section.py::TestSection::test_mark_submission_as_read",
"tests/test_section.py::TestSection::test_mark_submission_as_unread",
"tests/test_section.py::TestSection::test_subit_assignment_fail",
"tests/test_section.py::TestSection::test_submit_assignment",
"tests/test_section.py::TestSection::test_update_submission"
] | [] | MIT License | 1,678 | 492 | [
"canvasapi/course.py",
"canvasapi/section.py"
] |
|
pre-commit__pre-commit-622 | 773a817f7fa300c5561e7d27ff6a67b11c261fc5 | 2017-09-17 22:23:10 | 3a7806ea30507dbfba6571260210420a62f8022d | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
index cfd6381..1d0c364 100644
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -8,6 +8,7 @@ import time
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
+from pre_commit.util import mkdirp
logger = logging.getLogger('pre_commit')
@@ -43,6 +44,7 @@ def staged_files_only(patch_dir):
'Stashing unstaged files to {}.'.format(patch_filename),
)
# Save the current unstaged changes as a patch
+ mkdirp(patch_dir)
with io.open(patch_filename, 'wb') as patch_file:
patch_file.write(diff_stdout_binary)
| Unstaged files + never ran pre-commit => "No such file or directory: .../.cache/pre-commit/patch..."
```
$ pre-commit run
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /home/asottile/.cache/pre-commit/patch1505686307.
An unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
Check the log at /home/asottile/.cache/pre-commit/pre-commit.log
```
Stacktrace:
```python
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 44, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 231, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 249, in run
with ctx:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py", line 46, in staged_files_only
with io.open(patch_filename, 'wb') as patch_file:
IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
``` | pre-commit/pre-commit | diff --git a/tests/staged_files_only_test.py b/tests/staged_files_only_test.py
index aec55f5..36b1985 100644
--- a/tests/staged_files_only_test.py
+++ b/tests/staged_files_only_test.py
@@ -75,6 +75,15 @@ def test_foo_something_unstaged(foo_staged, patch_dir):
_test_foo_state(foo_staged, 'herp\nderp\n', 'AM')
+def test_does_not_crash_patch_dir_does_not_exist(foo_staged, patch_dir):
+ with io.open(foo_staged.foo_filename, 'w') as foo_file:
+ foo_file.write('hello\nworld\n')
+
+ shutil.rmtree(patch_dir)
+ with staged_files_only(patch_dir):
+ pass
+
+
def test_something_unstaged_ext_diff_tool(foo_staged, patch_dir, tmpdir):
diff_tool = tmpdir.join('diff-tool.sh')
diff_tool.write('#!/usr/bin/env bash\necho "$@"\n')
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
mccabe==0.7.0
mock==5.2.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@773a817f7fa300c5561e7d27ff6a67b11c261fc5#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-env==0.6.2
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-env==0.6.2
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/staged_files_only_test.py::test_does_not_crash_patch_dir_does_not_exist"
] | [] | [
"tests/staged_files_only_test.py::test_foo_staged",
"tests/staged_files_only_test.py::test_foo_nothing_unstaged",
"tests/staged_files_only_test.py::test_foo_something_unstaged",
"tests/staged_files_only_test.py::test_something_unstaged_ext_diff_tool",
"tests/staged_files_only_test.py::test_foo_something_unstaged_diff_color_always",
"tests/staged_files_only_test.py::test_foo_both_modify_non_conflicting",
"tests/staged_files_only_test.py::test_foo_both_modify_conflicting",
"tests/staged_files_only_test.py::test_img_staged",
"tests/staged_files_only_test.py::test_img_nothing_unstaged",
"tests/staged_files_only_test.py::test_img_something_unstaged",
"tests/staged_files_only_test.py::test_img_conflict",
"tests/staged_files_only_test.py::test_stage_utf8_changes",
"tests/staged_files_only_test.py::test_stage_non_utf8_changes",
"tests/staged_files_only_test.py::test_non_utf8_conflicting_diff",
"tests/staged_files_only_test.py::test_crlf[true-True-True]",
"tests/staged_files_only_test.py::test_crlf[true-True-False]",
"tests/staged_files_only_test.py::test_crlf[true-False-True]",
"tests/staged_files_only_test.py::test_crlf[true-False-False]",
"tests/staged_files_only_test.py::test_crlf[false-True-True]",
"tests/staged_files_only_test.py::test_crlf[false-True-False]",
"tests/staged_files_only_test.py::test_crlf[false-False-True]",
"tests/staged_files_only_test.py::test_crlf[false-False-False]",
"tests/staged_files_only_test.py::test_crlf[input-True-True]",
"tests/staged_files_only_test.py::test_crlf[input-True-False]",
"tests/staged_files_only_test.py::test_crlf[input-False-True]",
"tests/staged_files_only_test.py::test_crlf[input-False-False]",
"tests/staged_files_only_test.py::test_whitespace_errors",
"tests/staged_files_only_test.py::test_autocrlf_commited_crlf"
] | [] | MIT License | 1,680 | 188 | [
"pre_commit/staged_files_only.py"
] |
|
zhmcclient__python-zhmcclient-443 | 6b163ed76601e39d54eadd28577bdb18ad82181d | 2017-09-18 08:40:13 | 6b163ed76601e39d54eadd28577bdb18ad82181d | diff --git a/zhmcclient_mock/_urihandler.py b/zhmcclient_mock/_urihandler.py
index 3124ff2..d703b52 100644
--- a/zhmcclient_mock/_urihandler.py
+++ b/zhmcclient_mock/_urihandler.py
@@ -503,10 +503,11 @@ class CpcExportPortNamesListHandler(object):
class MetricsContextsHandler(object):
@staticmethod
- def post(hmc, uri, uri_parms, body, logon_required, wait_for_completion):
+ def post(method, hmc, uri, uri_parms, body, logon_required,
+ wait_for_completion):
"""Operation: Create Metrics Context."""
assert wait_for_completion is True # always synchronous
- check_required_fields('POST', uri, body,
+ check_required_fields(method, uri, body,
['anticipated-frequency-seconds'])
new_metrics_context = hmc.metrics_contexts.add(body)
result = {
@@ -519,21 +520,21 @@ class MetricsContextsHandler(object):
class MetricsContextHandler(object):
@staticmethod
- def delete(hmc, uri, uri_parms, logon_required):
+ def delete(method, hmc, uri, uri_parms, logon_required):
"""Operation: Delete Metrics Context."""
try:
metrics_context = hmc.lookup_by_uri(uri)
except KeyError:
- raise InvalidResourceError('DELETE', uri)
+ raise InvalidResourceError(method, uri)
hmc.metrics_contexts.remove(metrics_context.oid)
@staticmethod
- def get(hmc, uri, uri_parms, logon_required):
+ def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: Get Metrics."""
try:
metrics_context = hmc.lookup_by_uri(uri)
except KeyError:
- raise InvalidResourceError('GET', uri)
+ raise InvalidResourceError(method, uri)
result = metrics_context.get_metric_values_response()
return result
| zhmcclient_mock/_urihandler.py:315: TypeError
[test_27.log.txt](https://github.com/zhmcclient/python-zhmcclient/files/1309064/test_27.log.txt)
### Actual behavior
MetricsContextHandlersTests.test_create_get_delete_context fails due to a TypeError in file zhmcclient_mock/_urihandler.py, line 315.
This unit test error avoids to run successfully in Travis for master branch.
### Expected behavior
### Execution environment
* zhmcclient version: master
| zhmcclient/python-zhmcclient | diff --git a/tests/unit/zhmcclient_mock/test_urihandler.py b/tests/unit/zhmcclient_mock/test_urihandler.py
index 7c23cd4..324a9b5 100755
--- a/tests/unit/zhmcclient_mock/test_urihandler.py
+++ b/tests/unit/zhmcclient_mock/test_urihandler.py
@@ -21,12 +21,11 @@ from __future__ import absolute_import, print_function
import requests.packages.urllib3
import unittest
-# from datetime import datetime
+from datetime import datetime
from mock import MagicMock
-# from zhmcclient_mock._hmc import FakedHmc, FakedMetricGroupDefinition, \
-# FakedMetricObjectValues
-from zhmcclient_mock._hmc import FakedHmc
+from zhmcclient_mock._hmc import FakedHmc, FakedMetricGroupDefinition, \
+ FakedMetricObjectValues
from zhmcclient_mock._urihandler import HTTPError, InvalidResourceError, \
InvalidMethodError, CpcNotInDpmError, CpcInDpmError, \
@@ -1033,134 +1032,134 @@ class MetricsContextHandlersTests(unittest.TestCase):
)
self.urihandler = UriHandler(self.uris)
-# def test_create_get_delete_context(self):
-#
-# mc_mgr = self.hmc.metrics_contexts
-#
-# # Prepare faked metric group definitions
-#
-# mg_name = 'partition-usage'
-# mg_def = FakedMetricGroupDefinition(
-# name=mg_name,
-# types=[
-# ('metric-1', 'string-metric'),
-# ('metric-2', 'integer-metric'),
-# ])
-# mg_info = {
-# 'group-name': mg_name,
-# 'metric-infos': [
-# {
-# 'metric-name': 'metric-1',
-# 'metric-type': 'string-metric',
-# },
-# {
-# 'metric-name': 'metric-2',
-# 'metric-type': 'integer-metric',
-# },
-# ],
-# }
-# mc_mgr.add_metric_group_definition(mg_def)
-#
-# mg_name2 = 'cpc-usage'
-# mg_def2 = FakedMetricGroupDefinition(
-# name=mg_name2,
-# types=[
-# ('metric-3', 'string-metric'),
-# ('metric-4', 'integer-metric'),
-# ])
-# mg_info2 = {
-# 'group-name': mg_name2,
-# 'metric-infos': [
-# {
-# 'metric-name': 'metric-3',
-# 'metric-type': 'string-metric',
-# },
-# {
-# 'metric-name': 'metric-4',
-# 'metric-type': 'integer-metric',
-# },
-# ],
-# }
-# mc_mgr.add_metric_group_definition(mg_def2)
-#
-# # Prepare faked metric values
-#
-# mo_val1_input = FakedMetricObjectValues(
-# group_name=mg_name,
-# resource_uri='/api/partitions/fake-oid',
-# timestamp=datetime(2017, 9, 5, 12, 13, 10, 0),
-# values=[
-# ('metric-1', "a"),
-# ('metric-2', 5),
-# ])
-# mc_mgr.add_metric_values(mo_val1_input)
-#
-# mo_val2_input = FakedMetricObjectValues(
-# group_name=mg_name,
-# resource_uri='/api/partitions/fake-oid',
-# timestamp=datetime(2017, 9, 5, 12, 13, 20, 0),
-# values=[
-# ('metric-1', "b"),
-# ('metric-2', -7),
-# ])
-# mc_mgr.add_metric_values(mo_val2_input)
-#
-# mo_val3_input = FakedMetricObjectValues(
-# group_name=mg_name2,
-# resource_uri='/api/cpcs/fake-oid',
-# timestamp=datetime(2017, 9, 5, 12, 13, 10, 0),
-# values=[
-# ('metric-1', "c"),
-# ('metric-2', 0),
-# ])
-# mc_mgr.add_metric_values(mo_val3_input)
-#
-# body = {
-# 'anticipated-frequency-seconds': '10',
-# 'metric-groups': [mg_name, mg_name2],
-# }
-#
-# # the create function to be tested:
-# resp = self.urihandler.post(self.hmc, '/api/services/metrics/context',
-# body, True, True)
-#
-# self.assertIsInstance(resp, dict)
-# self.assertIn('metrics-context-uri', resp)
-# uri = resp['metrics-context-uri']
-# self.assertTrue(uri.startswith('/api/services/metrics/context/'))
-# self.assertIn('metric-group-infos', resp)
-# mg_infos = resp['metric-group-infos']
-# self.assertEqual(mg_infos, [mg_info, mg_info2])
-#
-# # the get function to be tested:
-# mv_resp = self.urihandler.get(self.hmc, uri, True)
-#
-# exp_mv_resp = '''"partition-usage"
-# "/api/partitions/fake-oid"
-# 1504613590000
-# "a",5
-#
-# "/api/partitions/fake-oid"
-# 1504613600000
-# "b",-7
-#
-#
-# "cpc-usage"
-# "/api/cpcs/fake-oid"
-# 1504613590000
-# "c",0
-#
-#
-#
-# '''
-# self.assertEqual(
-# mv_resp, exp_mv_resp,
-# "Actual response string:\n{!r}\n"
-# "Expected response string:\n{!r}\n".
-# format(mv_resp, exp_mv_resp))
-#
-# # the delete function to be tested:
-# self.urihandler.delete(self.hmc, uri, True)
+ def test_create_get_delete_context(self):
+
+ mc_mgr = self.hmc.metrics_contexts
+
+ # Prepare faked metric group definitions
+
+ mg_name = 'partition-usage'
+ mg_def = FakedMetricGroupDefinition(
+ name=mg_name,
+ types=[
+ ('metric-1', 'string-metric'),
+ ('metric-2', 'integer-metric'),
+ ])
+ mg_info = {
+ 'group-name': mg_name,
+ 'metric-infos': [
+ {
+ 'metric-name': 'metric-1',
+ 'metric-type': 'string-metric',
+ },
+ {
+ 'metric-name': 'metric-2',
+ 'metric-type': 'integer-metric',
+ },
+ ],
+ }
+ mc_mgr.add_metric_group_definition(mg_def)
+
+ mg_name2 = 'cpc-usage'
+ mg_def2 = FakedMetricGroupDefinition(
+ name=mg_name2,
+ types=[
+ ('metric-3', 'string-metric'),
+ ('metric-4', 'integer-metric'),
+ ])
+ mg_info2 = {
+ 'group-name': mg_name2,
+ 'metric-infos': [
+ {
+ 'metric-name': 'metric-3',
+ 'metric-type': 'string-metric',
+ },
+ {
+ 'metric-name': 'metric-4',
+ 'metric-type': 'integer-metric',
+ },
+ ],
+ }
+ mc_mgr.add_metric_group_definition(mg_def2)
+
+ # Prepare faked metric values
+
+ mo_val1_input = FakedMetricObjectValues(
+ group_name=mg_name,
+ resource_uri='/api/partitions/fake-oid',
+ timestamp=datetime(2017, 9, 5, 12, 13, 10, 0),
+ values=[
+ ('metric-1', "a"),
+ ('metric-2', 5),
+ ])
+ mc_mgr.add_metric_values(mo_val1_input)
+
+ mo_val2_input = FakedMetricObjectValues(
+ group_name=mg_name,
+ resource_uri='/api/partitions/fake-oid',
+ timestamp=datetime(2017, 9, 5, 12, 13, 20, 0),
+ values=[
+ ('metric-1', "b"),
+ ('metric-2', -7),
+ ])
+ mc_mgr.add_metric_values(mo_val2_input)
+
+ mo_val3_input = FakedMetricObjectValues(
+ group_name=mg_name2,
+ resource_uri='/api/cpcs/fake-oid',
+ timestamp=datetime(2017, 9, 5, 12, 13, 10, 0),
+ values=[
+ ('metric-1', "c"),
+ ('metric-2', 0),
+ ])
+ mc_mgr.add_metric_values(mo_val3_input)
+
+ body = {
+ 'anticipated-frequency-seconds': '10',
+ 'metric-groups': [mg_name, mg_name2],
+ }
+
+ # the create function to be tested:
+ resp = self.urihandler.post(self.hmc, '/api/services/metrics/context',
+ body, True, True)
+
+ self.assertIsInstance(resp, dict)
+ self.assertIn('metrics-context-uri', resp)
+ uri = resp['metrics-context-uri']
+ self.assertTrue(uri.startswith('/api/services/metrics/context/'))
+ self.assertIn('metric-group-infos', resp)
+ mg_infos = resp['metric-group-infos']
+ self.assertEqual(mg_infos, [mg_info, mg_info2])
+
+ # the get function to be tested:
+ mv_resp = self.urihandler.get(self.hmc, uri, True)
+
+ exp_mv_resp = '''"partition-usage"
+"/api/partitions/fake-oid"
+1504613590000
+"a",5
+
+"/api/partitions/fake-oid"
+1504613600000
+"b",-7
+
+
+"cpc-usage"
+"/api/cpcs/fake-oid"
+1504613590000
+"c",0
+
+
+
+'''
+ self.assertEqual(
+ mv_resp, exp_mv_resp,
+ "Actual response string:\n{!r}\n"
+ "Expected response string:\n{!r}\n".
+ format(mv_resp, exp_mv_resp))
+
+ # the delete function to be tested:
+ self.urihandler.delete(self.hmc, uri, True)
class AdapterHandlersTests(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.16 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-repl==0.3.0
click-spinner==0.1.10
coverage==7.8.0
decorator==5.2.1
docopt==0.6.2
exceptiongroup==1.2.2
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
progressbar2==4.5.0
prompt_toolkit==3.0.50
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-utils==3.9.1
pytz==2025.2
requests==2.32.3
six==1.17.0
stomp.py==8.2.0
tabulate==0.9.0
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
websocket-client==1.8.0
-e git+https://github.com/zhmcclient/python-zhmcclient.git@6b163ed76601e39d54eadd28577bdb18ad82181d#egg=zhmcclient
| name: python-zhmcclient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-repl==0.3.0
- click-spinner==0.1.10
- coverage==7.8.0
- decorator==5.2.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- progressbar2==4.5.0
- prompt-toolkit==3.0.50
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-utils==3.9.1
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- stomp-py==8.2.0
- tabulate==0.9.0
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
- wcwidth==0.2.13
- websocket-client==1.8.0
prefix: /opt/conda/envs/python-zhmcclient
| [
"tests/unit/zhmcclient_mock/test_urihandler.py::MetricsContextHandlersTests::test_create_get_delete_context"
] | [] | [
"tests/unit/zhmcclient_mock/test_urihandler.py::HTTPErrorTests::test_attributes",
"tests/unit/zhmcclient_mock/test_urihandler.py::HTTPErrorTests::test_response",
"tests/unit/zhmcclient_mock/test_urihandler.py::InvalidResourceErrorTests::test_attributes_no_handler",
"tests/unit/zhmcclient_mock/test_urihandler.py::InvalidResourceErrorTests::test_attributes_with_handler",
"tests/unit/zhmcclient_mock/test_urihandler.py::InvalidMethodErrorTests::test_attributes_no_handler",
"tests/unit/zhmcclient_mock/test_urihandler.py::InvalidMethodErrorTests::test_attributes_with_handler",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcNotInDpmErrorTests::test_attributes",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcInDpmErrorTests::test_attributes",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_empty",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_invalid_format_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_invalid_format_2",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_invalid_format_3",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_none",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_one_leading_amp",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_one_missing_name",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_one_missing_value",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_one_normal",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_one_trailing_amp",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_name_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_name_2",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_name_3",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_name_4",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_value_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_value_2",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_value_3",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_space_value_4",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_two_normal",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_two_same_normal",
"tests/unit/zhmcclient_mock/test_urihandler.py::ParseQueryParmsTests::test_two_same_one_normal",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerEmptyTests::test_uris_empty_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerEmptyTests::test_uris_empty_2",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_begin_extra",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_begin_missing",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_end2_extra",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_end2_missing",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_end2_slash",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_end_extra",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_end_missing",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_err_end_slash",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_ok1",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_ok2",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerHandlerSimpleTests::test_ok3",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerMethodTests::test_delete_cpc2",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerMethodTests::test_get_cpc1",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerMethodTests::test_get_cpcs",
"tests/unit/zhmcclient_mock/test_urihandler.py::UriHandlerMethodTests::test_post_cpcs",
"tests/unit/zhmcclient_mock/test_urihandler.py::GenericGetPropertiesHandlerTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::GenericUpdatePropertiesHandlerTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::VersionHandlerTests::test_get_version",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcHandlersTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcHandlersTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcStartStopHandlerTests::test_start_classic",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcStartStopHandlerTests::test_stop_classic",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcStartStopHandlerTests::test_stop_start_dpm",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcExportPortNamesListHandlerTests::test_invoke_err_no_input",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcExportPortNamesListHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcImportProfilesHandlerTests::test_invoke_err_no_input",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcImportProfilesHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcExportProfilesHandlerTests::test_invoke_err_no_input",
"tests/unit/zhmcclient_mock/test_urihandler.py::CpcExportProfilesHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::AdapterHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::AdapterHandlersTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::AdapterHandlersTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::AdapterChangeCryptoTypeHandlerTests::test_invoke_err_no_body",
"tests/unit/zhmcclient_mock/test_urihandler.py::AdapterChangeCryptoTypeHandlerTests::test_invoke_err_no_crypto_type_field",
"tests/unit/zhmcclient_mock/test_urihandler.py::AdapterChangeCryptoTypeHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::NetworkPortHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::NetworkPortHandlersTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::StoragePortHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::StoragePortHandlersTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionHandlersTests::test_create_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionHandlersTests::test_delete_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionHandlersTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionHandlersTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionStartStopHandlerTests::test_start_stop",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionScsiDumpHandlerTests::test_invoke_err_missing_fields_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionScsiDumpHandlerTests::test_invoke_err_missing_fields_2",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionScsiDumpHandlerTests::test_invoke_err_missing_fields_3",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionScsiDumpHandlerTests::test_invoke_err_no_body",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionScsiDumpHandlerTests::test_invoke_err_status_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionScsiDumpHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionPswRestartHandlerTests::test_invoke_err_status_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionPswRestartHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionMountIsoImageHandlerTests::test_invoke_err_queryparm_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionMountIsoImageHandlerTests::test_invoke_err_queryparm_2",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionMountIsoImageHandlerTests::test_invoke_err_status_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionMountIsoImageHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionUnmountIsoImageHandlerTests::test_invoke_err_status_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionUnmountIsoImageHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionIncreaseCryptoConfigHandlerTests::test_invoke_err_missing_body",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionIncreaseCryptoConfigHandlerTests::test_invoke_err_status_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionIncreaseCryptoConfigHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionDecreaseCryptoConfigHandlerTests::test_invoke_err_missing_body",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionDecreaseCryptoConfigHandlerTests::test_invoke_err_status_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionDecreaseCryptoConfigHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionChangeCryptoConfigHandlerTests::test_invoke_err_missing_body",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionChangeCryptoConfigHandlerTests::test_invoke_err_missing_field_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionChangeCryptoConfigHandlerTests::test_invoke_err_missing_field_2",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionChangeCryptoConfigHandlerTests::test_invoke_err_status_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::PartitionChangeCryptoConfigHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaHandlerTests::test_create_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaHandlerTests::test_delete_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaHandlerTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaHandlerTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaHandlerTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaReassignPortHandlerTests::test_invoke_err_missing_body",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaReassignPortHandlerTests::test_invoke_err_missing_field_1",
"tests/unit/zhmcclient_mock/test_urihandler.py::HbaReassignPortHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::NicHandlerTests::test_create_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::NicHandlerTests::test_delete_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::NicHandlerTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::NicHandlerTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::NicHandlerTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualFunctionHandlerTests::test_create_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualFunctionHandlerTests::test_delete_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualFunctionHandlerTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualFunctionHandlerTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualFunctionHandlerTests::test_update_verify",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualSwitchHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualSwitchHandlersTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::VirtualSwitchGetVnicsHandlerTests::test_invoke_ok",
"tests/unit/zhmcclient_mock/test_urihandler.py::LparHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::LparHandlersTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::LparActLoadDeactHandlerTests::test_start_stop",
"tests/unit/zhmcclient_mock/test_urihandler.py::ResetActProfileHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::ResetActProfileHandlersTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::ImageActProfileHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::ImageActProfileHandlersTests::test_list",
"tests/unit/zhmcclient_mock/test_urihandler.py::LoadActProfileHandlersTests::test_get",
"tests/unit/zhmcclient_mock/test_urihandler.py::LoadActProfileHandlersTests::test_list"
] | [] | Apache License 2.0 | 1,681 | 451 | [
"zhmcclient_mock/_urihandler.py"
] |
|
pydicom__pydicom-505 | fdabbfe94a898a76f38636c69136df4a17e72640 | 2017-09-18 15:49:52 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | pep8speaks: Hello @rzinkstok! Thanks for submitting the PR.
- In the file [`pydicom/filereader.py`](https://github.com/pydicom/pydicom/blob/22a0ca81718f71d3f38328a0f747a0d17b2dd1ab/pydicom/filereader.py), following are the PEP8 issues :
> [Line 489:80](https://github.com/pydicom/pydicom/blob/22a0ca81718f71d3f38328a0f747a0d17b2dd1ab/pydicom/filereader.py#L489): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (86 > 79 characters)
> [Line 505:80](https://github.com/pydicom/pydicom/blob/22a0ca81718f71d3f38328a0f747a0d17b2dd1ab/pydicom/filereader.py#L505): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (98 > 79 characters)
darcymason: I think this looks good. Thanks.
@pydicom/pydcom-fff, can someone do a second check?
mrbean-bremen: Looks good for me. | diff --git a/pydicom/filereader.py b/pydicom/filereader.py
index 427791b16..8fd751fa1 100644
--- a/pydicom/filereader.py
+++ b/pydicom/filereader.py
@@ -488,6 +488,20 @@ def _read_file_meta_info(fp):
start_file_meta = fp.tell()
file_meta = read_dataset(fp, is_implicit_VR=False, is_little_endian=True,
stop_when=_not_group_0002)
+ if not file_meta:
+ return file_meta
+
+ # Test the file meta for correct interpretation by requesting the first
+ # data element: if it fails, retry loading the file meta with an
+ # implicit VR (issue #503)
+ try:
+ file_meta[list(file_meta)[0].tag]
+ except NotImplementedError:
+ fp.seek(start_file_meta)
+ file_meta = read_dataset(fp, is_implicit_VR=True,
+ is_little_endian=True,
+ stop_when=_not_group_0002)
+
# Log if the Group Length doesn't match actual length
if 'FileMetaInformationGroupLength' in file_meta:
# FileMetaInformationGroupLength must be 12 bytes long and its value
@@ -501,6 +515,7 @@ def _read_file_meta_info(fp):
"bytes)."
.format(file_meta.FileMetaInformationGroupLength,
length_file_meta))
+
return file_meta
| Support for DICOM files with implicit VR file meta
#### Description
Up to pydicom 0.9.9, it was possible to read a DICOM file that lacked any file meta. When using pydicom from the current master branch, this is no longer the case. When no file meta is present in a DICOM file, the `_read_file_meta` function returns a dataset containing only a group length data element, and then a crash occurs when the TransferSyntaxUID is requested from the file_meta_dataset (line 634 in filereader.py).
It can be fixed by having the `_read_file_meta_info` function return an empty dataset instead. For my solution, see https://github.com/rzinkstok/pydicom/commit/d246f92066a7b4fc8b28ed7f18b28281fc1568f0. I can start a pull request if that is appreciated.
I'm not sure whether the current behavior is intended. Of course, files without meta info are not DICOM-compliant, but our DICOM archives contains a huge amount of old files without file meta, so it would be really convenient if pydicom could read these files without too much fuss.
#### Steps/Code to Reproduce
Get a file without file meta (I can supply such a file) and run the following:
```py
from pydicom import read_file
f = read_file("very_old_ct_slice.dcm", force=True)
print(f.PatientName)
```
#### Expected Results
No error is thrown and the name of the patient is printed.
#### Actual Results
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/vagrant/Projects/pydicom/src/pydicom/pydicom/filereader.py", line 804, in read_file
force=force, specific_tags=specific_tags)
File "/home/vagrant/Projects/pydicom/src/pydicom/pydicom/filereader.py", line 634, in read_partial
transfer_syntax = file_meta_dataset.get("TransferSyntaxUID")
File "/home/vagrant/Projects/pydicom/src/pydicom/pydicom/dataset.py", line 431, in get
return getattr(self, key)
File "/home/vagrant/Projects/pydicom/src/pydicom/pydicom/dataset.py", line 475, in __getattr__
return self[tag].value
File "/home/vagrant/Projects/pydicom/src/pydicom/pydicom/dataset.py", line 554, in __getitem__
self[tag] = DataElement_from_raw(data_elem, character_set)
File "/home/vagrant/Projects/pydicom/src/pydicom/pydicom/dataelem.py", line 493, in DataElement_from_raw
raise NotImplementedError("{0:s} in tag {1!r}".format(str(e), raw.tag))
NotImplementedError: Unknown Value Representation '' in tag (0002, 0010)
```
#### Versions
Platform: Linux-4.4.0-93-generic-x86_64-with-Ubuntu-16.04-xenial
Python: ('Python', '2.7.11+ (default, Apr 17 2016, 14:00:29) \n[GCC 5.3.1 20160413]')
Pydicom: 1.0.0a1, at commit ac905d951c95f1d2474b8cd1c35c8eb055e5273b
| pydicom/pydicom | diff --git a/pydicom/tests/test_filereader.py b/pydicom/tests/test_filereader.py
index 9941e488c..dd2c44cc4 100644
--- a/pydicom/tests/test_filereader.py
+++ b/pydicom/tests/test_filereader.py
@@ -664,6 +664,17 @@ class ReaderTests(unittest.TestCase):
self.assertTrue(ds.preamble is None)
self.assertEqual(ds.file_meta, Dataset())
+ def test_file_meta_dataset_implicit_vr(self):
+ """Test reading a file meta dataset that is implicit VR"""
+
+ bytestream = (b'\x02\x00\x10\x00\x12\x00\x00\x00'
+ b'\x31\x2e\x32\x2e\x38\x34\x30\x2e'
+ b'\x31\x30\x30\x30\x38\x2e\x31\x2e'
+ b'\x32\x00')
+ fp = BytesIO(bytestream)
+ ds = read_file(fp, force=True)
+ self.assertTrue('TransferSyntaxUID' in ds.file_meta)
+
def test_no_dataset(self):
"""Test reading no elements or preamble produces empty Dataset"""
bytestream = b''
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
execnet==1.9.0
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/pydicom/pydicom.git@fdabbfe94a898a76f38636c69136df4a17e72640#egg=pydicom
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_filereader.py::ReaderTests::test_file_meta_dataset_implicit_vr"
] | [
"pydicom/tests/test_filereader.py::DeferredReadTests::testFileExists",
"pydicom/tests/test_filereader.py::DeferredReadTests::testTimeCheck",
"pydicom/tests/test_filereader.py::DeferredReadTests::testValuesIdentical",
"pydicom/tests/test_filereader.py::DeferredReadTests::testZippedDeferred"
] | [
"pydicom/tests/test_filereader.py::ReaderTests::testCT",
"pydicom/tests/test_filereader.py::ReaderTests::testDeflate",
"pydicom/tests/test_filereader.py::ReaderTests::testDir",
"pydicom/tests/test_filereader.py::ReaderTests::testEmptyNumbersTag",
"pydicom/tests/test_filereader.py::ReaderTests::testExplicitVRBigEndianNoMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testExplicitVRLittleEndianNoMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testMR",
"pydicom/tests/test_filereader.py::ReaderTests::testNestedPrivateSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testNoForce",
"pydicom/tests/test_filereader.py::ReaderTests::testNoMetaGroupLength",
"pydicom/tests/test_filereader.py::ReaderTests::testNoPixelsRead",
"pydicom/tests/test_filereader.py::ReaderTests::testNoTransferSyntaxInMeta",
"pydicom/tests/test_filereader.py::ReaderTests::testPlanarConfig",
"pydicom/tests/test_filereader.py::ReaderTests::testPrivateSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testRTDose",
"pydicom/tests/test_filereader.py::ReaderTests::testRTPlan",
"pydicom/tests/test_filereader.py::ReaderTests::testRTstruct",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTags",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTagsWithUnknownLengthSQ",
"pydicom/tests/test_filereader.py::ReaderTests::testSpecificTagsWithUnknownLengthTag",
"pydicom/tests/test_filereader.py::ReaderTests::testUTF8FileName",
"pydicom/tests/test_filereader.py::ReaderTests::test_commandset_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr",
"pydicom/tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr_compressed",
"pydicom/tests/test_filereader.py::ReaderTests::test_group_length_wrong",
"pydicom/tests/test_filereader.py::ReaderTests::test_long_specific_char_set",
"pydicom/tests/test_filereader.py::ReaderTests::test_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_preamble_command_group_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_no_preamble_file_meta_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_command_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_commandset_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_preamble_meta_no_dataset",
"pydicom/tests/test_filereader.py::ReaderTests::test_read_file_does_not_raise",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_AE",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OD_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OD_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OL_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_OL_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UC_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UC_implicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UR_explicit_little",
"pydicom/tests/test_filereader.py::ReadDataElementTests::test_read_UR_implicit_little",
"pydicom/tests/test_filereader.py::ReadTruncatedFileTests::testReadFileWithMissingPixelData",
"pydicom/tests/test_filereader.py::FileLikeTests::testReadFileGivenFileLikeObject",
"pydicom/tests/test_filereader.py::FileLikeTests::testReadFileGivenFileObject"
] | [] | MIT License | 1,682 | 357 | [
"pydicom/filereader.py"
] |
OpenMined__PySyft-240 | 50c50da091ae6734237302efa5324d7dbed59164 | 2017-09-18 20:25:43 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/tensor.py b/syft/tensor.py
index 553420c417..8c97892ea3 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -1160,6 +1160,37 @@ class TensorBase(object):
def deserialize(b):
return pickle.loads(b)
+ def remainder(self, divisor):
+ """
+ Computes the element-wise remainder of division.
+ The divisor and dividend may contain both for integer and floating point numbers.
+ The remainder has the same sign as the divisor.
+ When ``divisor`` is a Tensor, the shapes of ``self`` and ``divisor`` must be broadcastable.
+ :param divisor: The divisor. This may be either a number or a tensor.
+ :return: result tensor
+ """
+ if self.encrypted:
+ return NotImplemented
+ if not np.isscalar(divisor):
+ divisor = _ensure_tensorbase(divisor)
+ return TensorBase(np.remainder(self.data, divisor))
+
+ def remainder_(self, divisor):
+ """
+ Computes the element-wise remainder of division.
+ The divisor and dividend may contain both for integer and floating point numbers.
+ The remainder has the same sign as the divisor.
+ When ``divisor`` is a Tensor, the shapes of ``self`` and ``divisor`` must be broadcastable.
+ :param divisor: The divisor. This may be either a number or a tensor.
+ :return: self
+ """
+ if self.encrypted:
+ return NotImplemented
+ if not np.isscalar(divisor):
+ divisor = _ensure_tensorbase(divisor)
+ self.data = np.remainder(self.data, divisor)
+ return self
+
def index_select(self, dim, index):
"""
Returns a new Tensor which indexes the ``input`` Tensor along
| Implement Default remainder Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, I want to leverage a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, remainder() should return a new tensor, but remainder_() should operate inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index e8429080be..9eaf6c42c6 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -873,6 +873,23 @@ class scatterTests(unittest.TestCase):
t.scatter_(dim=dim, index=idx, src=src)
+class remainderTests(unittest.TestCase):
+ def testRemainder(self):
+ t = TensorBase([[-2, -3], [4, 1]])
+ result = t.remainder(1.5)
+ self.assertTrue(np.array_equal(result.data, np.array([[1, 0], [1, 1]])))
+
+ def testRemainder_broadcasting(self):
+ t = TensorBase([[-2, -3], [4, 1]])
+ result = t.remainder([2, -3])
+ self.assertTrue(np.array_equal(result.data, np.array([[0, 0], [0, -2]])))
+
+ def testRemainder_(self):
+ t = TensorBase([[-2, -3], [4, 1]])
+ t.remainder_(2)
+ self.assertTrue(np.array_equal(t.data, np.array([[0, 1], [0, 1]])))
+
+
class testMv(unittest.TestCase):
def mvTest(self):
mat = TensorBase([[1, 2, 3], [2, 3, 4], [4, 5, 6]])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
clint==0.5.1
exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
line_profiler==4.2.0
mccabe==0.7.0
numpy==1.26.4
packaging==24.2
phe==1.5.0
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pyRserve==1.0.4
pytest==8.3.5
pytest-flake8==1.3.0
scipy==1.13.1
-e git+https://github.com/OpenMined/PySyft.git@50c50da091ae6734237302efa5324d7dbed59164#egg=syft
tomli==2.2.1
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- clint==0.5.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- line-profiler==4.2.0
- mccabe==0.7.0
- numpy==1.26.4
- packaging==24.2
- phe==1.5.0
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pyrserve==1.0.4
- pytest==8.3.5
- pytest-flake8==1.3.0
- scipy==1.13.1
- tomli==2.2.1
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::remainderTests::testRemainder",
"tests/test_tensor.py::remainderTests::testRemainder_",
"tests/test_tensor.py::remainderTests::testRemainder_broadcasting"
] | [
"tests/test_tensor.py::scatterTests::testScatter_Numerical0",
"tests/test_tensor.py::scatterTests::testScatter_Numerical1",
"tests/test_tensor.py::scatterTests::testScatter_Numerical2",
"tests/test_tensor.py::scatterTests::testScatter_Numerical3",
"tests/test_tensor.py::scatterTests::testScatter_Numerical4",
"tests/test_tensor.py::scatterTests::testScatter_Numerical5",
"tests/test_tensor.py::scatterTests::testScatter_Numerical6"
] | [
"tests/test_tensor.py::DimTests::testAsView",
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::DimTests::testResize",
"tests/test_tensor.py::DimTests::testResizeAs",
"tests/test_tensor.py::DimTests::testSize",
"tests/test_tensor.py::DimTests::testView",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::ZeroTests::testZero",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MaxTests::testAxis",
"tests/test_tensor.py::MaxTests::testNoDim",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::transposeTests::testT",
"tests/test_tensor.py::transposeTests::testT_",
"tests/test_tensor.py::transposeTests::testTranspose",
"tests/test_tensor.py::transposeTests::testTranspose_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::signTests::testsign",
"tests/test_tensor.py::signTests::testsign_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_",
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_",
"tests/test_tensor.py::clampTests::testClampFloat",
"tests/test_tensor.py::clampTests::testClampFloatInPlace",
"tests/test_tensor.py::clampTests::testClampInt",
"tests/test_tensor.py::clampTests::testClampIntInPlace",
"tests/test_tensor.py::cloneTests::testClone",
"tests/test_tensor.py::chunkTests::testChunk",
"tests/test_tensor.py::chunkTests::testChunkSameSize",
"tests/test_tensor.py::gtTests::testGtInPlaceWithNumber",
"tests/test_tensor.py::gtTests::testGtInPlaceWithTensor",
"tests/test_tensor.py::gtTests::testGtWithNumber",
"tests/test_tensor.py::gtTests::testGtWithTensor",
"tests/test_tensor.py::bernoulliTests::testBernoulli",
"tests/test_tensor.py::bernoulliTests::testBernoulli_",
"tests/test_tensor.py::uniformTests::testUniform",
"tests/test_tensor.py::uniformTests::testUniform_",
"tests/test_tensor.py::fillTests::testFill_",
"tests/test_tensor.py::topkTests::testTopK",
"tests/test_tensor.py::tolistTests::testToList",
"tests/test_tensor.py::traceTests::testTrace",
"tests/test_tensor.py::roundTests::testRound",
"tests/test_tensor.py::roundTests::testRound_",
"tests/test_tensor.py::repeatTests::testRepeat",
"tests/test_tensor.py::powTests::testPow",
"tests/test_tensor.py::powTests::testPow_",
"tests/test_tensor.py::prodTests::testProd",
"tests/test_tensor.py::randomTests::testRandom_",
"tests/test_tensor.py::nonzeroTests::testNonZero",
"tests/test_tensor.py::cumprodTest::testCumprod",
"tests/test_tensor.py::cumprodTest::testCumprod_",
"tests/test_tensor.py::splitTests::testSplit",
"tests/test_tensor.py::squeezeTests::testSqueeze",
"tests/test_tensor.py::expandAsTests::testExpandAs",
"tests/test_tensor.py::meanTests::testMean",
"tests/test_tensor.py::notEqualTests::testNe",
"tests/test_tensor.py::notEqualTests::testNe_",
"tests/test_tensor.py::index_selectTests::testIndex_select",
"tests/test_tensor.py::gatherTests::testGatherNumerical1",
"tests/test_tensor.py::gatherTests::testGatherNumerical2",
"tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexType",
"tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting2",
"tests/test_tensor.py::eqTests::testEqInPlaceWithNumber",
"tests/test_tensor.py::eqTests::testEqInPlaceWithTensor",
"tests/test_tensor.py::eqTests::testEqWithNumber",
"tests/test_tensor.py::eqTests::testEqWithTensor"
] | [] | Apache License 2.0 | 1,683 | 443 | [
"syft/tensor.py"
] |
|
CORE-GATECH-GROUP__serpent-tools-4 | 606a90d665a15a16c437dd1fb72ef014aa480142 | 2017-09-19 15:34:09 | 606a90d665a15a16c437dd1fb72ef014aa480142 | diff --git a/serpentTools/__init__.py b/serpentTools/__init__.py
index 8af8d0f..d38090d 100644
--- a/serpentTools/__init__.py
+++ b/serpentTools/__init__.py
@@ -1,7 +1,7 @@
from serpentTools import settings
from serpentTools import parsers
-__version__ = '0.1.1'
+__version__ = '0.1.2'
# List TODOS/feature requests here for now
# Messages/Errors
diff --git a/serpentTools/objects/__init__.py b/serpentTools/objects/__init__.py
index 42ee6c0..8f2e2cd 100644
--- a/serpentTools/objects/__init__.py
+++ b/serpentTools/objects/__init__.py
@@ -146,8 +146,18 @@ class DepletedMaterial(_SupportingObject):
AttributeError
If the names of the isotopes have not been obtained and specific
isotopes have been requested
+ KeyError
+ If at least one of the days requested is not present
"""
- returnX = timePoints is None
+ if timePoints is not None:
+ returnX = False
+ timeCheck = self._checkTimePoints(xUnits, timePoints)
+ if any(timeCheck):
+ raise KeyError('The following times were not present in file {}'
+ '\n{}'.format(self._container.filePath,
+ ', '.join(timeCheck)))
+ else:
+ returnX = True
if names and 'names' not in self._container.metadata:
raise AttributeError('Parser {} has not stored the isotope names.'
.format(self._container))
@@ -164,6 +174,12 @@ class DepletedMaterial(_SupportingObject):
return yVals, xVals
return yVals
+ def _checkTimePoints(self, xUnits, timePoints):
+ valid = self[xUnits]
+ badPoints = [str(time) for time in timePoints if time not in valid]
+ return badPoints
+
+
def _getXSlice(self, xUnits, timePoints):
allX = self[xUnits]
if timePoints is not None:
diff --git a/serpentTools/parsers/branching.py b/serpentTools/parsers/branching.py
index 6817810..8efd2f1 100644
--- a/serpentTools/parsers/branching.py
+++ b/serpentTools/parsers/branching.py
@@ -1,9 +1,9 @@
"""Parser responsible for reading the ``*coe.m`` files"""
-from serpentTools.parsers import _BaseReader
+from serpentTools.objects.readers import BaseReader
-class BranchingReader(_BaseReader):
+class BranchingReader(BaseReader):
"""
Parser responsible for reading and working with automated branching files.
diff --git a/serpentTools/parsers/bumat.py b/serpentTools/parsers/bumat.py
index 70ead0a..27c155b 100644
--- a/serpentTools/parsers/bumat.py
+++ b/serpentTools/parsers/bumat.py
@@ -1,9 +1,9 @@
"""Parser responsible for reading the ``*bumat<n>.m`` files"""
-from serpentTools.parsers import _MaterialReader
+from serpentTools.objects.readers import MaterialReader
-class BumatReader(_MaterialReader):
+class BumatReader(MaterialReader):
"""
Parser responsible for reading and working with burned material files.
diff --git a/serpentTools/parsers/detector.py b/serpentTools/parsers/detector.py
index ea40920..9c4c33c 100644
--- a/serpentTools/parsers/detector.py
+++ b/serpentTools/parsers/detector.py
@@ -1,9 +1,9 @@
"""Parser responsible for reading the ``*det<n>.m`` files"""
-from serpentTools.parsers import _BaseReader
+from serpentTools.objects.readers import BaseReader
-class DetectorReader(_BaseReader):
+class DetectorReader(BaseReader):
"""
Parser responsible for reading and working with detector files.
diff --git a/serpentTools/parsers/fissionMatrix.py b/serpentTools/parsers/fissionMatrix.py
index 58af81d..3923415 100644
--- a/serpentTools/parsers/fissionMatrix.py
+++ b/serpentTools/parsers/fissionMatrix.py
@@ -1,9 +1,9 @@
"""Parser responsible for reading the ``*fmtx<n>.m`` files"""
-from serpentTools.parsers import _BaseReader
+from serpentTools.objects.readers import BaseReader
-class FissionMatrixReader(_BaseReader):
+class FissionMatrixReader(BaseReader):
"""
Parser responsible for reading and working with fission matrix files.
diff --git a/serpentTools/parsers/results.py b/serpentTools/parsers/results.py
index f33b68c..ae4f1ef 100644
--- a/serpentTools/parsers/results.py
+++ b/serpentTools/parsers/results.py
@@ -1,9 +1,9 @@
"""Parser responsible for reading the ``*res.m`` files"""
-from serpentTools.parsers import _BaseReader
+from serpentTools.objects.readers import BaseReader
-class ResultsReader(_BaseReader):
+class ResultsReader(BaseReader):
"""
Parser responsible for reading and working with result files.
| `getXY` for `DepletedMaterial` raises unhelpful value error if days missing from object
If a depleted material is asked to get some quantity for days that are not present, the following error is raised:
```
File "C:\Users\ajohnson400\AppData\Local\Continuum\Anaconda3\lib\site-packages\serpenttools-0.1.1rc0-py3.5.egg\serpentTools\objects\__init__.py", line 162, in getXY
else allY[rowId][:])
ValueError: could not broadcast input array from shape (19) into shape (0)
```
The value of `colIndices` is an empty list for this case. | CORE-GATECH-GROUP/serpent-tools | diff --git a/serpentTools/tests/test_depletion.py b/serpentTools/tests/test_depletion.py
index 0574305..d7f6d52 100644
--- a/serpentTools/tests/test_depletion.py
+++ b/serpentTools/tests/test_depletion.py
@@ -156,6 +156,17 @@ class DepletedMaterialTester(_DepletionTestHelper):
names=['Xe135'])
numpy.testing.assert_equal(actualDays, self.reader.metadata['days'])
+ def test_getXY_raisesError_badTime(self):
+ """Verify that a ValueError is raised for non-present requested days."""
+ badDays = [-1, 0, 50]
+ with self.assertRaises(KeyError):
+ self.material.getXY('days', 'adens', timePoints=badDays)
+
+ def test_fetchData(self):
+ """Verify that key errors are raised when bad data are requested."""
+ with self.assertRaises(KeyError):
+ _ = self.material['fake units']
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 7
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | contourpy==1.3.0
cycler==0.12.1
drewtils==0.1.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
fonttools==4.56.0
importlib_resources==6.5.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
kiwisolver==1.4.7
matplotlib==3.9.4
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pillow==11.1.0
pluggy @ file:///croot/pluggy_1733169602837/work
pyparsing==3.2.3
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
-e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@606a90d665a15a16c437dd1fb72ef014aa480142#egg=serpentTools
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
zipp==3.21.0
| name: serpent-tools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- contourpy==1.3.0
- cycler==0.12.1
- drewtils==0.1.9
- fonttools==4.56.0
- importlib-resources==6.5.2
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numpy==2.0.2
- pillow==11.1.0
- pyparsing==3.2.3
- python-dateutil==2.9.0.post0
- six==1.17.0
- zipp==3.21.0
prefix: /opt/conda/envs/serpent-tools
| [
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_raisesError_badTime"
] | [] | [
"serpentTools/tests/test_depletion.py::DepletionTester::test_ReadMaterials",
"serpentTools/tests/test_depletion.py::DepletionTester::test_metadata",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_fetchData",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_adens",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_adensAndTime",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_full",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_slice",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_materials"
] | [] | MIT License | 1,688 | 1,274 | [
"serpentTools/__init__.py",
"serpentTools/objects/__init__.py",
"serpentTools/parsers/branching.py",
"serpentTools/parsers/bumat.py",
"serpentTools/parsers/detector.py",
"serpentTools/parsers/fissionMatrix.py",
"serpentTools/parsers/results.py"
] |
|
wookayin__gpustat-28 | a38bc5fd11add4a8ab805f5b327020196ce558d0 | 2017-09-19 17:20:15 | a38bc5fd11add4a8ab805f5b327020196ce558d0 | cjw85: Changed: `power.use` -> `power.draw`, and `power.limit` -> `enforced.power.limit` as given by `nvidia-smi --query`. Changed API properties to be consistent (whilst not being verbose for the second case).
wookayin: Between `power.limit` and `enforced.power.limit`, what is the quantity you are seeking to print out? I thought it was `power.limit`, but you do `enforced.power.limit`, right?
cjw85: I'm calling `N.nvmlDeviceGetEnforcedPowerLimit` so `enforced.power.limit` is the correct form. This is described as "the minimum of various power limiters.", so seems to be the most relevant to display in the simple interface (and provide in the simple API). | diff --git a/gpustat.py b/gpustat.py
index 4ec8a41..df936c4 100755
--- a/gpustat.py
+++ b/gpustat.py
@@ -113,6 +113,24 @@ class GPUStat(object):
v = self.entry['utilization.gpu']
return int(v) if v is not None else None
+ @property
+ def power_draw(self):
+ """
+ Returns the GPU power usage in Watts,
+ or None if the information is not available.
+ """
+ v = self.entry['power.draw']
+ return int(v) if v is not None else None
+
+ @property
+ def power_limit(self):
+ """
+ Returns the (enforced) GPU power limit in Watts,
+ or None if the information is not available.
+ """
+ v = self.entry['enforced.power.limit']
+ return int(v) if v is not None else None
+
@property
def processes(self):
"""
@@ -126,6 +144,7 @@ class GPUStat(object):
show_cmd=False,
show_user=False,
show_pid=False,
+ show_power=False,
gpuname_width=16,
term=Terminal(),
):
@@ -150,6 +169,8 @@ class GPUStat(object):
colors['CUser'] = term.bold_black # gray
colors['CUtil'] = _conditional(lambda: int(self.entry['utilization.gpu']) < 30,
term.green, term.bold_green)
+ colors['CPowU'] = term.bold_red
+ colors['CPowL'] = term.red
if not with_colors:
for k in list(colors.keys()):
@@ -160,10 +181,14 @@ class GPUStat(object):
else: return str(v)
# build one-line display information
- reps = ("%(C1)s[{entry[index]}]%(C0)s %(CName)s{entry[name]:{gpuname_width}}%(C0)s |" +
- "%(CTemp)s{entry[temperature.gpu]:>3}'C%(C0)s, %(CUtil)s{entry[utilization.gpu]:>3} %%%(C0)s | " +
- "%(C1)s%(CMemU)s{entry[memory.used]:>5}%(C0)s / %(CMemT)s{entry[memory.total]:>5}%(C0)s MB"
- ) % colors
+ # we want power use optional, but if deserves being grouped with temperature and utilization
+ reps = "%(C1)s[{entry[index]}]%(C0)s %(CName)s{entry[name]:{gpuname_width}}%(C0)s |" \
+ "%(CTemp)s{entry[temperature.gpu]:>3}'C%(C0)s, %(CUtil)s{entry[utilization.gpu]:>3} %%%(C0)s"
+
+ if show_power:
+ reps += ", %(CPowU)s{entry[power.draw]:>3}%(C0)s / %(CPowL)s{entry[enforced.power.limit]:>3}%(C0)s W"
+ reps += " | %(C1)s%(CMemU)s{entry[memory.used]:>5}%(C0)s / %(CMemT)s{entry[memory.total]:>5}%(C0)s MB"
+ reps = (reps) % colors
reps = reps.format(entry={k: _repr(v) for (k, v) in self.entry.items()},
gpuname_width=gpuname_width)
reps += " |"
@@ -252,6 +277,16 @@ class GPUStatCollection(object):
except N.NVMLError:
utilization = None # Not supported
+ try:
+ power = N.nvmlDeviceGetPowerUsage(handle)
+ except:
+ power = None
+
+ try:
+ power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle)
+ except:
+ power_limit = None
+
processes = []
try:
nv_comp_processes = N.nvmlDeviceGetComputeRunningProcesses(handle)
@@ -284,6 +319,8 @@ class GPUStatCollection(object):
'name': name,
'temperature.gpu': temperature,
'utilization.gpu': utilization.gpu if utilization else None,
+ 'power.draw': int(power / 1000) if power is not None else None,
+ 'enforced.power.limit': int(power_limit / 1000) if power is not None else None,
# Convert bytes into MBytes
'memory.used': int(memory.used / 1024 / 1024) if memory else None,
'memory.total': int(memory.total / 1024 / 1024) if memory else None,
@@ -323,7 +360,7 @@ class GPUStatCollection(object):
def print_formatted(self, fp=sys.stdout, force_color=False, no_color=False,
show_cmd=False, show_user=False, show_pid=False,
- gpuname_width=16,
+ show_power=False, gpuname_width=16,
):
# ANSI color configuration
if force_color and no_color:
@@ -355,6 +392,7 @@ class GPUStatCollection(object):
show_cmd=show_cmd,
show_user=show_user,
show_pid=show_pid,
+ show_power=show_power,
gpuname_width=gpuname_width,
term=t_color)
fp.write('\n')
@@ -430,6 +468,8 @@ def main():
help='Display username of running process')
parser.add_argument('-p', '--show-pid', action='store_true',
help='Display PID of running process')
+ parser.add_argument('-P', '--show-power', action='store_true',
+ help='Show GPU power usage (and limit)')
parser.add_argument('--gpuname-width', type=int, default=16,
help='The minimum column width of GPU names, defaults to 16')
parser.add_argument('--json', action='store_true', default=False,
| Power usage
Hi,
How to add power usage and efficiency information ? | wookayin/gpustat | diff --git a/test_gpustat.py b/test_gpustat.py
index 0ac0279..4b81978 100644
--- a/test_gpustat.py
+++ b/test_gpustat.py
@@ -72,6 +72,18 @@ def _configure_mock(N, Process,
mock_handles[2]: 71,
}.get(handle, RuntimeError))
+ N.nvmlDeviceGetPowerUsage = _raise_ex(lambda handle: {
+ mock_handles[0]: 125000,
+ mock_handles[1]: 100000,
+ mock_handles[2]: 250000,
+ }.get(handle, RuntimeError))
+
+ N.nvmlDeviceGetEnforcedPowerLimit = _raise_ex(lambda handle: {
+ mock_handles[0]: 250000,
+ mock_handles[1]: 250000,
+ mock_handles[2]: 250000,
+ }.get(handle, RuntimeError))
+
mock_memory_t = namedtuple("Memory_t", ['total', 'used'])
N.nvmlDeviceGetMemoryInfo.side_effect = _raise_ex(lambda handle: {
mock_handles[0]: mock_memory_t(total=12883853312, used=8000*MB),
@@ -147,7 +159,7 @@ class TestGPUStat(unittest.TestCase):
gpustats = gpustat.new_query()
fp = StringIO()
- gpustats.print_formatted(fp=fp, no_color=False, show_user=True, show_cmd=True, show_pid=True)
+ gpustats.print_formatted(fp=fp, no_color=False, show_user=True, show_cmd=True, show_pid=True, show_power=True)
result = fp.getvalue()
print(result)
@@ -157,9 +169,9 @@ class TestGPUStat(unittest.TestCase):
unescaped = '\n'.join(unescaped.split('\n')[1:])
expected = """\
-[0] GeForce GTX TITAN 0 | 80'C, 76 % | 8000 / 12287 MB | user1:python/48448(4000M) user2:python/153223(4000M)
-[1] GeForce GTX TITAN 1 | 36'C, 0 % | 9000 / 12189 MB | user1:torch/192453(3000M) user3:caffe/194826(6000M)
-[2] GeForce GTX TITAN 2 | 71'C, ?? % | 0 / 12189 MB | (Not Supported)
+[0] GeForce GTX TITAN 0 | 80'C, 76 %, 125 / 250 W | 8000 / 12287 MB | user1:python/48448(4000M) user2:python/153223(4000M)
+[1] GeForce GTX TITAN 1 | 36'C, 0 %, 100 / 250 W | 9000 / 12189 MB | user1:torch/192453(3000M) user3:caffe/194826(6000M)
+[2] GeForce GTX TITAN 2 | 71'C, ?? %, 250 / 250 W | 0 / 12189 MB | (Not Supported)
"""
self.maxDiff = 4096
self.assertEqual(unescaped, expected)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
blessings==1.7
certifi==2021.5.30
-e git+https://github.com/wookayin/gpustat.git@a38bc5fd11add4a8ab805f5b327020196ce558d0#egg=gpustat
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
nvidia-ml-py3==7.352.0
packaging==21.3
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: gpustat
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- blessings==1.7
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- nvidia-ml-py3==7.352.0
- packaging==21.3
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/gpustat
| [
"test_gpustat.py::TestGPUStat::test_new_query_mocked"
] | [] | [
"test_gpustat.py::TestGPUStat::test_attributes_and_items",
"test_gpustat.py::TestGPUStat::test_new_query_mocked_nonexistent_pid"
] | [] | MIT License | 1,689 | 1,395 | [
"gpustat.py"
] |
tornadoweb__tornado-2157 | 34c43f4775971ab9b2b8ed43356f218add6387b2 | 2017-09-21 18:10:28 | 03f13800e854a6fc9e6efa2168e694d9599348bd | diff --git a/tornado/websocket.py b/tornado/websocket.py
index d5a7fa89..c6804ca0 100644
--- a/tornado/websocket.py
+++ b/tornado/websocket.py
@@ -616,6 +616,14 @@ class WebSocketProtocol13(WebSocketProtocol):
def accept_connection(self):
try:
self._handle_websocket_headers()
+ except ValueError:
+ self.handler.set_status(400)
+ log_msg = "Missing/Invalid WebSocket headers"
+ self.handler.finish(log_msg)
+ gen_log.debug(log_msg)
+ return
+
+ try:
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
| AttributeError if Websocket client misses required header
If the client misses required header for websocket handshake, the server raises AttributeError.
Minimal code for reproduce
### Client
```python
import socket
REQ_1 = ('GET /ws HTTP/1.1\r\n'
'Host: example.com:9221\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
# 'Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n'
'Sec-WebSocket-Version: 13\r\n'
'\r\n')
conn = socket.create_connection(('127.0.0.1', 9221))
conn.send(REQ_1.encode('utf-8'))
resp_1 = conn.recv(10 * 1024)
```
### Server
```python
import tornado.ioloop
import tornado.web
import tornado.websocket
class WsHandler(tornado.websocket.WebSocketHandler):
pass
def make_app():
return tornado.web.Application([
(r'/ws', WsHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(9221)
tornado.ioloop.IOLoop.current().start()
```
### Traceback
```
ERROR:tornado.application:Uncaught exception GET /ws (127.0.0.1)
HTTPServerRequest(protocol='http', host='example.com:8000', method='GET', uri='/ws', version='HTTP/1.1', remote_ip='127.0.0.1', headers={'Host': 'example.com:8000', 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-Websocket-Version': '13'})
Traceback (most recent call last):
File "/home/pjknkda/test/ws-invalid/python-env/lib/python3.6/site-packages/tornado/websocket.py", line 618, in accept_connection
self._handle_websocket_headers()
File "/home/pjknkda/test/ws-invalid/python-env/lib/python3.6/site-packages/tornado/websocket.py", line 634, in _handle_websocket_headers
raise ValueError("Missing/Invalid WebSocket headers")
ValueError: Missing/Invalid WebSocket headers
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pjknkda/test/ws-invalid/python-env/lib/python3.6/site-packages/tornado/web.py", line 1467, in _stack_context_handle_exception
raise_exc_info((type, value, traceback))
File "<string>", line 4, in raise_exc_info
File "/home/pjknkda/test/ws-invalid/python-env/lib/python3.6/site-packages/tornado/web.py", line 1669, in wrapper
result = method(self, *args, **kwargs)
File "/home/pjknkda/test/ws-invalid/python-env/lib/python3.6/site-packages/tornado/websocket.py", line 196, in get
self.ws_connection.accept_connection()
File "/home/pjknkda/test/ws-invalid/python-env/lib/python3.6/site-packages/tornado/websocket.py", line 623, in accept_connection
self._abort()
File "/home/pjknkda/test/ws-invalid/python-env/lib/python3.6/site-packages/tornado/websocket.py", line 512, in _abort
self.stream.close() # forcibly tear down the connection
AttributeError: 'NoneType' object has no attribute 'close'
ERROR:tornado.access:500 GET /ws (127.0.0.1) 4.13ms
```
It seems that `WebSocketProtocol13.accept_connection` calls `WebSocketProtocol._abort` immediately if there is missing required headers, however, it is before the handshake, thus there is yet no `self.stream` whereas the _abort function tries to `self.stream.close()`. Also, the _abort function calls `self.close()` and there is also the same buggy code which calls `self.stream.close()` without checking the nullity of `self.stream`. | tornadoweb/tornado | diff --git a/tornado/test/websocket_test.py b/tornado/test/websocket_test.py
index 5a2a6577..54734d81 100644
--- a/tornado/test/websocket_test.py
+++ b/tornado/test/websocket_test.py
@@ -193,6 +193,13 @@ class WebSocketTest(WebSocketBaseTestCase):
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
+ def test_missing_websocket_key(self):
+ response = self.fetch('/echo',
+ headers={'Connection': 'Upgrade',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Version': '13'})
+ self.assertEqual(response.code, 400)
+
def test_bad_websocket_version(self):
response = self.fetch('/echo',
headers={'Connection': 'Upgrade',
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 4.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"sphinx",
"sphinx_rtd_theme",
"codecov",
"virtualenv",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
requests==2.27.1
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
-e git+https://github.com/tornadoweb/tornado.git@34c43f4775971ab9b2b8ed43356f218add6387b2#egg=tornado
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/tornado
| [
"tornado/test/websocket_test.py::WebSocketTest::test_missing_websocket_key"
] | [] | [
"tornado/test/websocket_test.py::WebSocketTest::test_async_prepare",
"tornado/test/websocket_test.py::WebSocketTest::test_bad_websocket_version",
"tornado/test/websocket_test.py::WebSocketTest::test_binary_message",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_invalid",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_invalid_partial_url",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_invalid_subdomains",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_valid_no_path",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_valid_with_path",
"tornado/test/websocket_test.py::WebSocketTest::test_client_close_reason",
"tornado/test/websocket_test.py::WebSocketTest::test_coroutine",
"tornado/test/websocket_test.py::WebSocketTest::test_error_in_on_message",
"tornado/test/websocket_test.py::WebSocketTest::test_http_request",
"tornado/test/websocket_test.py::WebSocketTest::test_path_args",
"tornado/test/websocket_test.py::WebSocketTest::test_render_message",
"tornado/test/websocket_test.py::WebSocketTest::test_server_close_reason",
"tornado/test/websocket_test.py::WebSocketTest::test_unicode_message",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_callbacks",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_close_buffered_data",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_gen",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_header_echo",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_headers",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_http_fail",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_http_success",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_network_fail",
"tornado/test/websocket_test.py::WebSocketTest::test_write_after_close",
"tornado/test/websocket_test.py::WebSocketNativeCoroutineTest::test_native_coroutine",
"tornado/test/websocket_test.py::NoCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::ServerOnlyCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::ClientOnlyCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::DefaultCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::PythonMaskFunctionTest::test_mask",
"tornado/test/websocket_test.py::CythonMaskFunctionTest::test_mask",
"tornado/test/websocket_test.py::ServerPeriodicPingTest::test_server_ping",
"tornado/test/websocket_test.py::ClientPeriodicPingTest::test_client_ping",
"tornado/test/websocket_test.py::MaxMessageSizeTest::test_large_message"
] | [] | Apache License 2.0 | 1,693 | 171 | [
"tornado/websocket.py"
] |
|
OpenMined__PySyft-250 | 6d94259b46fb6516ab20773cbbb9bb6ddac3d3cd | 2017-09-23 09:32:13 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/tensor.py b/syft/tensor.py
index 02a63a36d7..553420c417 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -1200,6 +1200,21 @@ class TensorBase(object):
out_flat = [s if m == 0 else source_iter.__next__().item() for m, s in mask_self_iter]
self.data = np.reshape(out_flat, self.data.shape)
+ def eq(self, t):
+ """Returns a new Tensor having boolean True values where an element of the calling tensor is equal to the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ if self.encrypted:
+ return NotImplemented
+ return TensorBase(np.equal(self.data, _ensure_tensorbase(t).data))
+
+ def eq_(self, t):
+ """Writes in-place, boolean True values where an element of the calling tensor is equal to the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ if self.encrypted:
+ return NotImplemented
+ self.data = np.equal(self.data, _ensure_tensorbase(t).data)
+ return self
+
def mv(tensormat, tensorvector):
""" matrix and vector multiplication """
| Implement Default eq Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, eq() should return a new tensor and eq_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index 3ca9315457..e8429080be 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -908,5 +908,29 @@ class masked_scatter_Tests(unittest.TestCase):
self.assertTrue(np.array_equal(t, TensorBase([[1, 2, 3], [1, 1, 1]])))
+class eqTests(unittest.TestCase):
+ def testEqWithTensor(self):
+ t1 = TensorBase(np.arange(5))
+ t2 = TensorBase(np.arange(5)[-1::-1])
+ truth_values = t1.eq(t2)
+ self.assertEqual(truth_values, [False, False, True, False, False])
+
+ def testEqWithNumber(self):
+ t1 = TensorBase(np.arange(5))
+ truth_values = t1.eq(1)
+ self.assertEqual(truth_values, [False, True, False, False, False])
+
+ def testEqInPlaceWithTensor(self):
+ t1 = TensorBase(np.arange(5))
+ t2 = TensorBase(np.arange(5)[-1::-1])
+ t1.eq_(t2)
+ self.assertEqual(t1, [False, False, True, False, False])
+
+ def testEqInPlaceWithNumber(self):
+ t1 = TensorBase(np.arange(5))
+ t1.eq_(1)
+ self.assertEqual(t1, [False, True, False, False, False])
+
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
attrs==22.2.0
certifi==2021.5.30
clint==0.5.1
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
line-profiler==4.1.3
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
phe==1.5.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pyRserve==1.0.4
pytest==7.0.1
pytest-flake8==1.1.1
scipy==1.5.4
-e git+https://github.com/OpenMined/PySyft.git@6d94259b46fb6516ab20773cbbb9bb6ddac3d3cd#egg=syft
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- attrs==22.2.0
- clint==0.5.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- line-profiler==4.1.3
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- phe==1.5.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pyrserve==1.0.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- scipy==1.5.4
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::eqTests::testEqInPlaceWithNumber",
"tests/test_tensor.py::eqTests::testEqInPlaceWithTensor",
"tests/test_tensor.py::eqTests::testEqWithNumber",
"tests/test_tensor.py::eqTests::testEqWithTensor"
] | [] | [
"tests/test_tensor.py::DimTests::testAsView",
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::DimTests::testResize",
"tests/test_tensor.py::DimTests::testResizeAs",
"tests/test_tensor.py::DimTests::testSize",
"tests/test_tensor.py::DimTests::testView",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::ZeroTests::testZero",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MaxTests::testAxis",
"tests/test_tensor.py::MaxTests::testNoDim",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::transposeTests::testT",
"tests/test_tensor.py::transposeTests::testT_",
"tests/test_tensor.py::transposeTests::testTranspose",
"tests/test_tensor.py::transposeTests::testTranspose_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::signTests::testsign",
"tests/test_tensor.py::signTests::testsign_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_",
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_",
"tests/test_tensor.py::clampTests::testClampFloat",
"tests/test_tensor.py::clampTests::testClampFloatInPlace",
"tests/test_tensor.py::clampTests::testClampInt",
"tests/test_tensor.py::clampTests::testClampIntInPlace",
"tests/test_tensor.py::cloneTests::testClone",
"tests/test_tensor.py::chunkTests::testChunk",
"tests/test_tensor.py::chunkTests::testChunkSameSize",
"tests/test_tensor.py::gtTests::testGtInPlaceWithNumber",
"tests/test_tensor.py::gtTests::testGtInPlaceWithTensor",
"tests/test_tensor.py::gtTests::testGtWithNumber",
"tests/test_tensor.py::gtTests::testGtWithTensor",
"tests/test_tensor.py::bernoulliTests::testBernoulli",
"tests/test_tensor.py::bernoulliTests::testBernoulli_",
"tests/test_tensor.py::uniformTests::testUniform",
"tests/test_tensor.py::uniformTests::testUniform_",
"tests/test_tensor.py::fillTests::testFill_",
"tests/test_tensor.py::topkTests::testTopK",
"tests/test_tensor.py::tolistTests::testToList",
"tests/test_tensor.py::traceTests::testTrace",
"tests/test_tensor.py::roundTests::testRound",
"tests/test_tensor.py::roundTests::testRound_",
"tests/test_tensor.py::repeatTests::testRepeat",
"tests/test_tensor.py::powTests::testPow",
"tests/test_tensor.py::powTests::testPow_",
"tests/test_tensor.py::prodTests::testProd",
"tests/test_tensor.py::randomTests::testRandom_",
"tests/test_tensor.py::nonzeroTests::testNonZero",
"tests/test_tensor.py::cumprodTest::testCumprod",
"tests/test_tensor.py::cumprodTest::testCumprod_",
"tests/test_tensor.py::splitTests::testSplit",
"tests/test_tensor.py::squeezeTests::testSqueeze",
"tests/test_tensor.py::expandAsTests::testExpandAs",
"tests/test_tensor.py::meanTests::testMean",
"tests/test_tensor.py::notEqualTests::testNe",
"tests/test_tensor.py::notEqualTests::testNe_",
"tests/test_tensor.py::index_selectTests::testIndex_select",
"tests/test_tensor.py::gatherTests::testGatherNumerical1",
"tests/test_tensor.py::gatherTests::testGatherNumerical2",
"tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexType",
"tests/test_tensor.py::scatterTests::testScatter_Numerical0",
"tests/test_tensor.py::scatterTests::testScatter_Numerical1",
"tests/test_tensor.py::scatterTests::testScatter_Numerical2",
"tests/test_tensor.py::scatterTests::testScatter_Numerical3",
"tests/test_tensor.py::scatterTests::testScatter_Numerical4",
"tests/test_tensor.py::scatterTests::testScatter_Numerical5",
"tests/test_tensor.py::scatterTests::testScatter_Numerical6",
"tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting2"
] | [] | Apache License 2.0 | 1,696 | 333 | [
"syft/tensor.py"
] |
|
witchard__grole-9 | 58338694c664442a76e3c64e277c1873d13e2420 | 2017-09-23 16:40:17 | 58338694c664442a76e3c64e277c1873d13e2420 | diff --git a/grole.py b/grole.py
index be7559d..d1443fd 100755
--- a/grole.py
+++ b/grole.py
@@ -15,6 +15,7 @@ import pathlib
import html
import sys
import argparse
+import logging
from collections import defaultdict
__author__ = 'witchard'
@@ -310,6 +311,7 @@ class Grole:
self._handlers = defaultdict(list)
self.env = {'doc': []}
self.env.update(env)
+ self._logger = logging.getLogger('grole')
def route(self, path_regex, methods=['GET'], doc=True):
"""
@@ -338,7 +340,7 @@ class Grole:
Parses requests, finds appropriate handlers and returns responses
"""
peer = writer.get_extra_info('peername')
- print('New connection from {}'.format(peer))
+ self._logger.debug('New connection from {}'.format(peer))
try:
# Loop handling requests
while True:
@@ -361,7 +363,7 @@ class Grole:
res = Response(data=res)
except:
# Error - log it and return 500
- traceback.print_exc()
+ self._logger.error(traceback.format_exc())
res = Response(code=500, reason='Internal Server Error')
break
@@ -371,9 +373,9 @@ class Grole:
# Respond
await res._write(writer)
- print('{}: {} -> {}'.format(peer, req.path, res.code))
+ self._logger.info('{}: {} -> {}'.format(peer, req.path, res.code))
except EOFError:
- print('Connection closed from {}'.format(peer))
+ self._logger.debug('Connection closed from {}'.format(peer))
def run(self, host='localhost', port=1234):
"""
@@ -390,7 +392,7 @@ class Grole:
server = loop.run_until_complete(coro)
# Run the server
- print('Serving on {}'.format(server.sockets[0].getsockname()))
+ self._logger.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
@@ -414,6 +416,11 @@ def parse_args(args=sys.argv[1:]):
default='.')
parser.add_argument('-n', '--noindex', help='do not show directory indexes',
default=False, action='store_true')
+ loglevel = parser.add_mutually_exclusive_group()
+ loglevel.add_argument('-v', '--verbose', help='verbose logging',
+ default=False, action='store_true')
+ loglevel.add_argument('-q', '--quiet', help='quiet logging',
+ default=False, action='store_true')
return parser.parse_args(args)
def main(args=sys.argv[1:]):
@@ -421,6 +428,12 @@ def main(args=sys.argv[1:]):
Run Grole static file server
"""
args = parse_args(args)
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ elif args.quiet:
+ logging.basicConfig(level=logging.ERROR)
+ else:
+ logging.basicConfig(level=logging.INFO)
app = Grole()
serve_static(app, '', args.directory, not args.noindex)
app.run(args.address, args.port)
| Use logging library instead of print | witchard/grole | diff --git a/test/test_args.py b/test/test_args.py
index 5c50383..97a2df6 100644
--- a/test/test_args.py
+++ b/test/test_args.py
@@ -10,11 +10,21 @@ class TestArgs(unittest.TestCase):
self.assertEqual(args.port, 1234)
self.assertEqual(args.directory, '.')
self.assertEqual(args.noindex, False)
+ self.assertEqual(args.verbose, False)
+ self.assertEqual(args.quiet, False)
def test_override(self):
- args = grole.parse_args(['-a', 'foo', '-p', '27', '-d', 'bar', '-n'])
+ args = grole.parse_args(['-a', 'foo', '-p', '27', '-d', 'bar', '-n', '-v'])
self.assertEqual(args.address, 'foo')
self.assertEqual(args.port, 27)
self.assertEqual(args.directory, 'bar')
self.assertEqual(args.noindex, True)
+ self.assertEqual(args.verbose, True)
+ self.assertEqual(args.quiet, False)
+ def test_error(self):
+ try:
+ grole.parse_args(['-q', '-v'])
+ except SystemExit:
+ return
+ self.fail('Did not error on mutually exclusive args')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
-e git+https://github.com/witchard/grole.git@58338694c664442a76e3c64e277c1873d13e2420#egg=grole
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: grole
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/grole
| [
"test/test_args.py::TestArgs::test_defaults",
"test/test_args.py::TestArgs::test_override"
] | [] | [
"test/test_args.py::TestArgs::test_error"
] | [] | MIT License | 1,697 | 774 | [
"grole.py"
] |
|
ARMmbed__greentea-237 | 86f5ec3211a8f7f324bcdd3201012945ee0534ac | 2017-09-25 13:51:40 | 86f5ec3211a8f7f324bcdd3201012945ee0534ac | diff --git a/mbed_greentea/mbed_report_api.py b/mbed_greentea/mbed_report_api.py
index da3f0d9..82acb5c 100644
--- a/mbed_greentea/mbed_report_api.py
+++ b/mbed_greentea/mbed_report_api.py
@@ -38,6 +38,13 @@ def exporter_json(test_result_ext, test_suite_properties=None):
@details This is a machine friendly format
"""
import json
+ for target in test_result_ext.values():
+ for suite in target.values():
+ try:
+ suite["single_test_output"] = suite["single_test_output"]\
+ .decode("unicode_escape")
+ except KeyError:
+ pass
return json.dumps(test_result_ext, indent=4)
@@ -211,7 +218,10 @@ def exporter_testcase_junit(test_result_ext, test_suite_properties=None):
test_cases.append(tc)
ts_name = target_name
- test_build_properties = test_suite_properties[target_name] if target_name in test_suite_properties else None
+ if test_suite_properties and target_name in test_suite_properties:
+ test_build_properties = test_suite_properties[target_name]
+ else:
+ test_build_properties = None
ts = TestSuite(ts_name, test_cases, properties=test_build_properties)
test_suites.append(ts)
@@ -584,7 +594,9 @@ def get_result_overlay_dropdowns(result_div_id, test_results):
result_output_div_id = "%s_output" % result_div_id
result_output_dropdown = get_dropdown_html(result_output_div_id,
"Test Output",
- test_results['single_test_output'].rstrip("\n"),
+ test_results['single_test_output']
+ .decode("unicode-escape")
+ .rstrip("\n"),
output_text=True)
# Add a dropdown for the testcases if they are present
@@ -740,10 +752,14 @@ def exporter_html(test_result_ext, test_suite_properties=None):
test_results['single_test_count'] += 1
result_class = get_result_colour_class(test_results['single_test_result'])
+ try:
+ percent_pass = int((test_results['single_test_passes']*100.0)/test_results['single_test_count'])
+ except ZeroDivisionError:
+ percent_pass = 100
this_row += result_cell_template % (result_class,
result_div_id,
test_results['single_test_result'],
- int((test_results['single_test_passes']*100.0)/test_results['single_test_count']),
+ percent_pass,
test_results['single_test_passes'],
test_results['single_test_count'],
result_overlay)
| mbedgt crash with float division by zero
Hi
Here is my command:
mbedgt -V -v -t NUCLEO_F401RE-ARM,NUCLEO_F401RE-GCC_ARM,NUCLEO_F401RE-IAR,NUCLEO_F410RB-ARM,NUCLEO_F410RB-GCC_ARM,NUCLEO_F410RB-IAR,NUCLEO_F411RE-ARM,NUCLEO_F411RE-GCC_ARM,NUCLEO_F411RE-IAR --report-html=/c/xxx.html
It has crashed:
...
mbedgt: all tests finished!
mbedgt: shuffle seed: 0.3680156551
mbedgt: exporting to HTML file
mbedgt: unexpected error:
float division by zero
Traceback (most recent call last):
File "C:\Python27\Scripts\mbedgt-script.py", line 11, in <module>
load_entry_point('mbed-greentea==1.2.6', 'console_scripts', 'mbedgt')()
File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 401, in main
cli_ret = main_cli(opts, args)
File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 1050, in main_cli
html_report = exporter_html(test_report)
File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 747, in exporter_html
int((test_results['single_test_passes']*100.0)/test_results['single_test_count']),
ZeroDivisionError: float division by zero
| ARMmbed/greentea | diff --git a/test/report_api.py b/test/report_api.py
new file mode 100644
index 0000000..122e26e
--- /dev/null
+++ b/test/report_api.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+mbed SDK
+Copyright (c) 2017 ARM Limited
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import unittest
+from mock import patch
+
+from mbed_greentea.mbed_report_api import exporter_html, \
+ exporter_memory_metrics_csv, exporter_testcase_junit, \
+ exporter_testcase_text, exporter_text, exporter_json
+
+
+class ReportEmitting(unittest.TestCase):
+
+
+ report_fns = [exporter_html, exporter_memory_metrics_csv,
+ exporter_testcase_junit, exporter_testcase_text,
+ exporter_text, exporter_json]
+ def test_report_zero_tests(self):
+ test_data = {}
+ for report_fn in self.report_fns:
+ report_fn(test_data)
+
+ def test_report_zero_testcases(self):
+ test_data = {
+ 'k64f-gcc_arm': {
+ 'garbage_test_suite' :{
+ u'single_test_result': u'NOT_RAN',
+ u'elapsed_time': 0.0,
+ u'build_path': u'N/A',
+ u'build_path_abs': u'N/A',
+ u'copy_method': u'N/A',
+ u'image_path': u'N/A',
+ u'single_test_output': b'N/A',
+ u'platform_name': u'k64f',
+ u'test_bin_name': u'N/A',
+ u'testcase_result': {},
+ }
+ }
+ }
+ for report_fn in self.report_fns:
+ report_fn(test_data)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
beautifulsoup4==4.13.3
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.3.9
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
fasteners==0.19
future==1.0.0
idna==3.10
iniconfig==2.1.0
intelhex==2.3.0
junit-xml==1.9
lockfile==0.12.2
-e git+https://github.com/ARMmbed/greentea.git@86f5ec3211a8f7f324bcdd3201012945ee0534ac#egg=mbed_greentea
mbed-host-tests==1.8.15
mbed-ls==1.8.15
mbed-os-tools==1.8.15
mock==5.2.0
packaging==24.2
pluggy==1.5.0
prettytable==2.5.0
pyserial==3.5
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
six==1.17.0
soupsieve==2.6
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
| name: greentea
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- beautifulsoup4==4.13.3
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.3.9
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- fasteners==0.19
- future==1.0.0
- idna==3.10
- iniconfig==2.1.0
- intelhex==2.3.0
- junit-xml==1.9
- lockfile==0.12.2
- mbed-host-tests==1.8.15
- mbed-ls==1.8.15
- mbed-os-tools==1.8.15
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- prettytable==2.5.0
- pyserial==3.5
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- six==1.17.0
- soupsieve==2.6
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
- wcwidth==0.2.13
prefix: /opt/conda/envs/greentea
| [
"test/report_api.py::ReportEmitting::test_report_zero_testcases"
] | [] | [
"test/report_api.py::ReportEmitting::test_report_zero_tests"
] | [] | Apache License 2.0 | 1,701 | 614 | [
"mbed_greentea/mbed_report_api.py"
] |
|
OpenMined__PySyft-254 | 6c84afb0d4d541039bdcad4357cc7b62a3d24084 | 2017-09-26 21:01:03 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/tensor.py b/syft/tensor.py
index 2bb335111e..a5f21740c9 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -725,19 +725,72 @@ class TensorBase(object):
else:
return [TensorBase(x) for x in np.array_split(self.data, n, dim)]
- def gt(self, t):
+ def gt(self, other):
"""Returns a new Tensor having boolean True values where an element of the calling tensor is greater than the second Tensor, False otherwise.
The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
- if self.encrypted:
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
return NotImplemented
- return TensorBase(np.greater(self.data, _ensure_tensorbase(t).data))
+ return TensorBase(np.greater(self.data, other.data))
- def gt_(self, t):
+ def gt_(self, other):
"""Writes in-place, boolean True values where an element of the calling tensor is greater than the second Tensor, False otherwise.
The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
- if self.encrypted:
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
+ return NotImplemented
+ self.data = np.greater(self.data, other.data)
+ return self
+
+ def lt(self, other):
+ """Returns a new Tensor having boolean True values where an element of the calling tensor is less than the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
+ return NotImplemented
+ return TensorBase(np.less(self.data, other.data))
+
+ def lt_(self, other):
+ """Writes in-place, boolean True values where an element of the calling tensor is less than the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
+ return NotImplemented
+ self.data = np.less(self.data, other.data)
+ return self
+
+ def ge(self, other):
+ """Returns a new Tensor having boolean True values where an element of the calling tensor is greater or equal than the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
+ return NotImplemented
+ return TensorBase(np.greater_equal(self.data, other.data))
+
+ def ge_(self, other):
+ """Writes in-place, boolean True values where an element of the calling tensor is greater or equal than the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
+ return NotImplemented
+ self.data = np.greater_equal(self.data, other.data)
+ return self
+
+ def le(self, other):
+ """Returns a new Tensor having boolean True values where an element of the calling tensor is less or equal than the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
+ return NotImplemented
+ return TensorBase(np.less_equal(self.data, other.data))
+
+ def le_(self, other):
+ """Writes in-place, boolean True values where an element of the calling tensor is less or equal than the second Tensor, False otherwise.
+ The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor."""
+ other = _ensure_tensorbase(other)
+ if self.encrypted or other.encrypted:
return NotImplemented
- self.data = np.greater(self.data, _ensure_tensorbase(t).data)
+ self.data = np.less_equal(self.data, other.data)
return self
def bernoulli(self, p):
| Implement Default ge Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, ge() should return a new tensor and ge_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index 62fc6336bb..55a16ceab9 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -560,28 +560,107 @@ class chunkTests(unittest.TestCase):
self.assertEqual(t2.shape(), t3.shape())
-class gtTests(unittest.TestCase):
+class inequalityTest(unittest.TestCase):
+ def setUp(self):
+ self.a1 = np.array([-2, -1, 0, 1, 2])
+ self.a2 = np.array([-4, -1, 5, 2, 2])
+
+ self.t1 = TensorBase(self.a1)
+ self.t2 = TensorBase(self.a2)
+
+ self.enc = TensorBase(self.a1, encrypted=True)
+
+
+class gtTests(inequalityTest):
def testGtWithTensor(self):
- t1 = TensorBase(np.arange(10))
- t2 = TensorBase(np.arange(10)[-1::-1])
- truth_values = t1.gt(t2)
- self.assertEqual(truth_values, [False, False, False, False, False, True, True, True, True, True])
+ self.assertEqual(self.t1.gt(self.t2), self.a1 > self.a2)
def testGtWithNumber(self):
- t1 = TensorBase(np.arange(10))
- truth_values = t1.gt(-1)
- self.assertEqual(truth_values, [True] * 10)
+ self.assertEqual(self.t1.gt(1), self.a1 > 1)
def testGtInPlaceWithTensor(self):
- t1 = TensorBase(np.arange(10))
- t2 = TensorBase(np.arange(10)[-1::-1])
- t1.gt_(t2)
- self.assertEqual(t1, [False, False, False, False, False, True, True, True, True, True])
+ self.t1.gt_(self.t2)
+ self.assertEqual(self.t1, self.a1 > self.a2)
def testGtInPlaceWithNumber(self):
- t1 = TensorBase(np.arange(10))
- t1.gt_(-1)
- self.assertEqual(t1, [True] * 10)
+ self.t1.gt_(1)
+ self.assertEqual(self.t1, self.a1 > 1)
+
+ def testWithEncrypted(self):
+ res = self.t1.gt(self.enc)
+ self.assertEqual(res, NotImplemented)
+
+ res = self.enc.gt(self.t1)
+ self.assertEqual(res, NotImplemented)
+
+
+class geTests(inequalityTest):
+ def testGeWithTensor(self):
+ self.assertEqual(self.t1.ge(self.t2), self.a1 >= self.a2)
+
+ def testGeWithNumber(self):
+ self.assertEqual(self.t1.ge(1), self.a1 >= 1)
+
+ def testGeInPlaceWithTensor(self):
+ self.t1.ge_(self.t2)
+ self.assertEqual(self.t1, self.a1 >= self.a2)
+
+ def testGeInPlaceWithNumber(self):
+ self.t1.ge_(1)
+ self.assertEqual(self.t1, self.a1 >= 1)
+
+ def testWithEncrypted(self):
+ res = self.t1.ge(self.enc)
+ self.assertEqual(res, NotImplemented)
+
+ res = self.enc.ge(self.t1)
+ self.assertEqual(res, NotImplemented)
+
+
+class ltTests(inequalityTest):
+ def testLtWithTensor(self):
+ self.assertEqual(self.t1.lt(self.t2), self.a1 < self.a2)
+
+ def testLtWithNumber(self):
+ self.assertEqual(self.t1.lt(1), self.a1 < 1)
+
+ def testLtInPlaceWithTensor(self):
+ self.t1.lt_(self.t2)
+ self.assertEqual(self.t1, self.a1 < self.a2)
+
+ def testLtInPlaceWithNumber(self):
+ self.t1.lt_(1)
+ self.assertEqual(self.t1, self.a1 < 1)
+
+ def testWithEncrypted(self):
+ res = self.t1.lt(self.enc)
+ self.assertEqual(res, NotImplemented)
+
+ res = self.enc.lt(self.t1)
+ self.assertEqual(res, NotImplemented)
+
+
+class leTests(inequalityTest):
+ def testLeWithTensor(self):
+ self.assertEqual(self.t1.le(self.t2), self.a1 <= self.a2)
+
+ def testLeWithNumber(self):
+ self.assertEqual(self.t1.le(1), self.a1 <= 1)
+
+ def testLeInPlaceWithTensor(self):
+ self.t1.le_(self.t2)
+ self.assertEqual(self.t1, self.a1 <= self.a2)
+
+ def testLeInPlaceWithNumber(self):
+ self.t1.le_(1)
+ self.assertEqual(self.t1, self.a1 <= 1)
+
+ def testWithEncrypted(self):
+ res = self.t1.le(self.enc)
+ self.assertEqual(res, NotImplemented)
+
+ res = self.enc.le(self.t1)
+ self.assertEqual(res, NotImplemented)
class bernoulliTests(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"line_profiler",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
attrs==22.2.0
certifi==2021.5.30
clint==0.5.1
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
line-profiler==4.1.3
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
phe==1.5.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pyRserve==1.0.4
pytest==7.0.1
pytest-flake8==1.1.1
scipy==1.5.4
-e git+https://github.com/OpenMined/PySyft.git@6c84afb0d4d541039bdcad4357cc7b62a3d24084#egg=syft
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- attrs==22.2.0
- clint==0.5.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- line-profiler==4.1.3
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- phe==1.5.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pyrserve==1.0.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- scipy==1.5.4
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::gtTests::testWithEncrypted",
"tests/test_tensor.py::geTests::testGeInPlaceWithNumber",
"tests/test_tensor.py::geTests::testGeInPlaceWithTensor",
"tests/test_tensor.py::geTests::testGeWithNumber",
"tests/test_tensor.py::geTests::testGeWithTensor",
"tests/test_tensor.py::geTests::testWithEncrypted",
"tests/test_tensor.py::ltTests::testLtInPlaceWithNumber",
"tests/test_tensor.py::ltTests::testLtInPlaceWithTensor",
"tests/test_tensor.py::ltTests::testLtWithNumber",
"tests/test_tensor.py::ltTests::testLtWithTensor",
"tests/test_tensor.py::ltTests::testWithEncrypted",
"tests/test_tensor.py::leTests::testLeInPlaceWithNumber",
"tests/test_tensor.py::leTests::testLeInPlaceWithTensor",
"tests/test_tensor.py::leTests::testLeWithNumber",
"tests/test_tensor.py::leTests::testLeWithTensor",
"tests/test_tensor.py::leTests::testWithEncrypted"
] | [] | [
"tests/test_tensor.py::DimTests::testAsView",
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::DimTests::testResize",
"tests/test_tensor.py::DimTests::testResizeAs",
"tests/test_tensor.py::DimTests::testSize",
"tests/test_tensor.py::DimTests::testView",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::ZeroTests::testZero",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MaxTests::testAxis",
"tests/test_tensor.py::MaxTests::testNoDim",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::transposeTests::testT",
"tests/test_tensor.py::transposeTests::testT_",
"tests/test_tensor.py::transposeTests::testTranspose",
"tests/test_tensor.py::transposeTests::testTranspose_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::signTests::testsign",
"tests/test_tensor.py::signTests::testsign_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_",
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_",
"tests/test_tensor.py::clampTests::testClampFloat",
"tests/test_tensor.py::clampTests::testClampFloatInPlace",
"tests/test_tensor.py::clampTests::testClampInt",
"tests/test_tensor.py::clampTests::testClampIntInPlace",
"tests/test_tensor.py::cloneTests::testClone",
"tests/test_tensor.py::chunkTests::testChunk",
"tests/test_tensor.py::chunkTests::testChunkSameSize",
"tests/test_tensor.py::gtTests::testGtInPlaceWithNumber",
"tests/test_tensor.py::gtTests::testGtInPlaceWithTensor",
"tests/test_tensor.py::gtTests::testGtWithNumber",
"tests/test_tensor.py::gtTests::testGtWithTensor",
"tests/test_tensor.py::bernoulliTests::testBernoulli",
"tests/test_tensor.py::bernoulliTests::testBernoulli_",
"tests/test_tensor.py::uniformTests::testUniform",
"tests/test_tensor.py::uniformTests::testUniform_",
"tests/test_tensor.py::fillTests::testFill_",
"tests/test_tensor.py::topkTests::testTopK",
"tests/test_tensor.py::tolistTests::testToList",
"tests/test_tensor.py::traceTests::testTrace",
"tests/test_tensor.py::roundTests::testRound",
"tests/test_tensor.py::roundTests::testRound_",
"tests/test_tensor.py::repeatTests::testRepeat",
"tests/test_tensor.py::powTests::testPow",
"tests/test_tensor.py::powTests::testPow_",
"tests/test_tensor.py::prodTests::testProd",
"tests/test_tensor.py::randomTests::testRandom_",
"tests/test_tensor.py::nonzeroTests::testNonZero",
"tests/test_tensor.py::cumprodTest::testCumprod",
"tests/test_tensor.py::cumprodTest::testCumprod_",
"tests/test_tensor.py::splitTests::testSplit",
"tests/test_tensor.py::squeezeTests::testSqueeze",
"tests/test_tensor.py::expandAsTests::testExpandAs",
"tests/test_tensor.py::meanTests::testMean",
"tests/test_tensor.py::notEqualTests::testNe",
"tests/test_tensor.py::notEqualTests::testNe_",
"tests/test_tensor.py::index_selectTests::testIndex_select",
"tests/test_tensor.py::gatherTests::testGatherNumerical1",
"tests/test_tensor.py::gatherTests::testGatherNumerical2",
"tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexType",
"tests/test_tensor.py::scatterTests::testScatter_Numerical0",
"tests/test_tensor.py::scatterTests::testScatter_Numerical1",
"tests/test_tensor.py::scatterTests::testScatter_Numerical2",
"tests/test_tensor.py::scatterTests::testScatter_Numerical3",
"tests/test_tensor.py::scatterTests::testScatter_Numerical4",
"tests/test_tensor.py::scatterTests::testScatter_Numerical5",
"tests/test_tensor.py::scatterTests::testScatter_Numerical6",
"tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch",
"tests/test_tensor.py::remainderTests::testRemainder",
"tests/test_tensor.py::remainderTests::testRemainder_",
"tests/test_tensor.py::remainderTests::testRemainder_broadcasting",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting2",
"tests/test_tensor.py::masked_fill_Tests::testMasked_fill_",
"tests/test_tensor.py::masked_fill_Tests::testMasked_fill_broadcasting",
"tests/test_tensor.py::eqTests::testEqInPlaceWithNumber",
"tests/test_tensor.py::eqTests::testEqInPlaceWithTensor",
"tests/test_tensor.py::eqTests::testEqWithNumber",
"tests/test_tensor.py::eqTests::testEqWithTensor"
] | [] | Apache License 2.0 | 1,703 | 1,001 | [
"syft/tensor.py"
] |
|
mkdocs__mkdocs-1294 | b8123ffb86da6c2eef114db0c2eea7c6315a281a | 2017-09-27 18:07:01 | 84906a7a6c936719539339b2f46658c1a561527f | diff --git a/mkdocs/utils/__init__.py b/mkdocs/utils/__init__.py
index eb9f7a42..2b335111 100644
--- a/mkdocs/utils/__init__.py
+++ b/mkdocs/utils/__init__.py
@@ -103,12 +103,15 @@ def reduce_list(data_set):
def copy_file(source_path, output_path):
"""
Copy source_path to output_path, making sure any parent directories exist.
- """
+ The output_path may be a directory.
+ """
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
- shutil.copy(source_path, output_path)
+ if os.path.isdir(output_path):
+ output_path = os.path.join(output_path, os.path.basename(source_path))
+ shutil.copyfile(source_path, output_path)
def write_file(content, output_path):
| Permission denied when copying a template from a read-only path
There are two things uncommon about my setup that cause the error.
1. I have installed mkdocs using [Nix](https://nixos.org/nix/). Long story short, all the mkdocs files (the ones in `lib/python2.7/site-packages/mkdocs`) have mode 0444 (that is, read-only).
2. I have `theme_dir` set in `mkdocs.yml` and I use it to overwrite one of the theme files, namely `js/highlight.pack.js`.
This is what I get:
~~~~
$ mkdocs build
WARNING - Config value: 'extra_javascript'. Warning: The following files have been automatically included in the documentation build and will be added to the HTML: highlight/theme/js/highlight.pack.js. This behavior is deprecated. In version 1.0 and later they will need to be explicitly listed in the 'extra_javascript' config setting.
INFO - Cleaning site directory
INFO - Building documentation to directory: <project path>/build/site
Traceback (most recent call last):
File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/bin/.mkdocs-wrapped", line 12, in <module>
sys.exit(cli())
File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/__main__.py", line 156, in build_command
), dirty=not clean)
File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/commands/build.py", line 373, in build
theme_dir, config['site_dir'], exclude=['*.py', '*.pyc', '*.html'], dirty=dirty
File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/utils/__init__.py", line 175, in copy_media_files
copy_file(source_path, output_path)
File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/utils/__init__.py", line 110, in copy_file
shutil.copy(source_path, output_path)
File "/nix/store/w8zld7z4gq4b36z0szgrh6yv5zi30915-python-2.7.13/lib/python2.7/shutil.py", line 119, in copy
copyfile(src, dst)
File "/nix/store/w8zld7z4gq4b36z0szgrh6yv5zi30915-python-2.7.13/lib/python2.7/shutil.py", line 83, in copyfile
with open(dst, 'wb') as fdst:
IOError: [Errno 13] Permission denied: u'<project path>/build/site/js/highlight.pack.js'
$ ls -l build/site/js/
total 396
-r--r--r-- 1 kirelagin staff 300764 Sep 26 16:03 highlight.pack.js
-r--r--r-- 1 kirelagin staff 84245 Sep 26 16:03 jquery-2.1.1.min.js
-r--r--r-- 1 kirelagin staff 11084 Sep 26 16:03 modernizr-2.8.3.min.js
-r--r--r-- 1 kirelagin staff 2676 Sep 26 16:03 theme.js
$ ls -ld build/site/js/
drwxr-xr-x 6 kirelagin staff 204 Sep 26 16:03 build/site/js/
~~~~
What happens is, the built-in theme files get copied with their permissions preserved, so `site/js/highlight.pack.js` ends up having mode 0444. Next mkdocs tries to overwrite this file with the one from the `theme_dir` and at this point `shutil.copyfile` fails, because that’s how it works.
I’m not really sure what to do with that. Probably, catching the exception and adjusting the permissions would make sense. | mkdocs/mkdocs | diff --git a/mkdocs/tests/utils/utils_tests.py b/mkdocs/tests/utils/utils_tests.py
index 5ffe24fd..fec697f7 100644
--- a/mkdocs/tests/utils/utils_tests.py
+++ b/mkdocs/tests/utils/utils_tests.py
@@ -6,6 +6,9 @@ from __future__ import unicode_literals
import mock
import os
import unittest
+import tempfile
+import shutil
+import stat
from mkdocs import nav, utils, exceptions
from mkdocs.tests.base import dedent
@@ -248,3 +251,71 @@ class UtilsTests(unittest.TestCase):
config = utils.yaml_load(yaml_src)
self.assertTrue(isinstance(config['key'], utils.text_type))
self.assertTrue(isinstance(config['key2'][0], utils.text_type))
+
+ def test_copy_files(self):
+ src_paths = [
+ 'foo.txt',
+ 'bar.txt',
+ 'baz.txt',
+ ]
+ dst_paths = [
+ 'foo.txt',
+ 'foo/', # ensure src filename is appended
+ 'foo/bar/baz.txt' # ensure missing dirs are created
+ ]
+ expected = [
+ 'foo.txt',
+ 'foo/bar.txt',
+ 'foo/bar/baz.txt',
+ ]
+
+ src_dir = tempfile.mkdtemp()
+ dst_dir = tempfile.mkdtemp()
+
+ try:
+ for i, src in enumerate(src_paths):
+ src = os.path.join(src_dir, src)
+ with open(src, 'w') as f:
+ f.write('content')
+ dst = os.path.join(dst_dir, dst_paths[i])
+ utils.copy_file(src, dst)
+ self.assertTrue(os.path.isfile(os.path.join(dst_dir, expected[i])))
+ finally:
+ shutil.rmtree(src_dir)
+ shutil.rmtree(dst_dir)
+
+ def test_copy_files_without_permissions(self):
+ src_paths = [
+ 'foo.txt',
+ 'bar.txt',
+ 'baz.txt',
+ ]
+ expected = [
+ 'foo.txt',
+ 'bar.txt',
+ 'baz.txt',
+ ]
+
+ src_dir = tempfile.mkdtemp()
+ dst_dir = tempfile.mkdtemp()
+
+ try:
+ for i, src in enumerate(src_paths):
+ src = os.path.join(src_dir, src)
+ with open(src, 'w') as f:
+ f.write('content')
+ # Set src file to read-only
+ os.chmod(src, stat.S_IRUSR)
+ utils.copy_file(src, dst_dir)
+ self.assertTrue(os.path.isfile(os.path.join(dst_dir, expected[i])))
+ self.assertNotEqual(os.stat(src).st_mode, os.stat(os.path.join(dst_dir, expected[i])).st_mode)
+ # While src was read-only, dst must remain writable
+ self.assertTrue(os.access(os.path.join(dst_dir, expected[i]), os.W_OK))
+ finally:
+ for src in src_paths:
+ # Undo read-only so we can delete temp files
+ src = os.path.join(src_dir, src)
+ if os.path.exists(src):
+ os.chmod(src, stat.S_IRUSR | stat.S_IWUSR)
+ shutil.rmtree(src_dir)
+ shutil.rmtree(dst_dir)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.16 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/project.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
coverage==7.8.0
exceptiongroup==1.2.2
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
livereload==2.7.1
Markdown==3.7
MarkupSafe==3.0.2
-e git+https://github.com/mkdocs/mkdocs.git@b8123ffb86da6c2eef114db0c2eea7c6315a281a#egg=mkdocs
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
tomli==2.2.1
tornado==6.4.2
zipp==3.21.0
| name: mkdocs
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- coverage==7.8.0
- exceptiongroup==1.2.2
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- livereload==2.7.1
- markdown==3.7
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- tomli==2.2.1
- tornado==6.4.2
- zipp==3.21.0
prefix: /opt/conda/envs/mkdocs
| [
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_copy_files_without_permissions"
] | [
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_create_media_urls",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_create_relative_media_url_sub_index",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_create_relative_media_url_sub_index_windows"
] | [
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_copy_files",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_theme_dir",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_theme_dir_importerror",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_theme_dir_keyerror",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_themes",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_themes_error",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_themes_warning",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_html_path",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_is_html_file",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_is_markdown_file",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_nest_paths",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_reduce_list",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_unicode_yaml",
"mkdocs/tests/utils/utils_tests.py::UtilsTests::test_url_path"
] | [] | BSD 2-Clause "Simplified" License | 1,705 | 215 | [
"mkdocs/utils/__init__.py"
] |
|
OpenMined__PySyft-257 | b0d646922b7529f5198dcc21fc856c7e9e598976 | 2017-09-28 06:36:51 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/__init__.py b/syft/__init__.py
index a172d04425..39d8074ab4 100644
--- a/syft/__init__.py
+++ b/syft/__init__.py
@@ -6,7 +6,7 @@ from syft import test
from syft.tensor import equal, TensorBase
from syft.math import cumprod, cumsum, ceil, dot, matmul, addmm, addcmul
from syft.math import addcdiv, addmv, addbmm, baddbmm, transpose
-from syft.math import unsqueeze, zeros, ones, rand, randn
+from syft.math import unsqueeze, zeros, ones, rand, randn, mm
s = str(he)
s += str(nn)
@@ -17,3 +17,4 @@ s += str(dot) + str(matmul) + str(addmm) + str(addcmul) + str(addcdiv)
s += str(addmv) + str(addbmm) + str(baddbmm)
s += str(transpose) + str(rand) + str(randn) + str(ones) + str(zeros)
s += str(unsqueeze)
+s += str(mm)
diff --git a/syft/math.py b/syft/math.py
index 4969a9a256..99e36958f3 100644
--- a/syft/math.py
+++ b/syft/math.py
@@ -9,7 +9,7 @@ from .tensor import _ensure_tensorbase
__all__ = [
'cumprod', 'cumsum', 'ceil', 'dot', 'floor', 'matmul', 'addmm', 'addcmul',
'addcdiv', 'addmv', 'addbmm', 'baddbmm', 'sigmoid', 'unsqueeze', 'tanh', 'relu',
- 'zeros', 'ones', 'rand', 'randn'
+ 'zeros', 'ones', 'rand', 'randn', 'mm'
]
@@ -341,3 +341,23 @@ def unsqueeze(tensor1, dim):
raise NotImplemented
else:
return TensorBase(np.expand_dims(tensor1.data, dim))
+
+
+def mm(tensor1, tensor2):
+ """
+ Performs a matrix multiplication of :attr:`tensor1` and :attr:`tensor2`.
+
+ If :attr:`tensor1` is a `n x m` Tensor, :attr:`tensor2` is a `m x p` Tensor,
+ output will be a `n x p` Tensor.
+
+ Args:
+ tensor1 (Tensor): First Tensor to be multiplied
+ tensor2 (Tensor): Second Tensor to be multiplied"""
+
+ _ensure_tensorbase(tensor1)
+ _ensure_tensorbase(tensor2)
+
+ if tensor1.encrypted or tensor2.encrypted:
+ return NotImplemented
+ else:
+ return TensorBase(np.array(np.matmul(tensor1.data, tensor2.data)))
diff --git a/syft/tensor.py b/syft/tensor.py
index 9117e74a13..7e4560fcf4 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -1067,11 +1067,13 @@ class TensorBase(object):
else:
if tensor.shape() == self.shape():
- tensor2 = np.array([1 if x else 0 for x in np.equal(tensor.data.flatten(), self.data.flatten()).tolist()])
+ tensor2 = np.array([1 if x else 0 for x in np.equal(
+ tensor.data.flatten(), self.data.flatten()).tolist()])
result = tensor2.reshape(self.data.shape)
return TensorBase(result)
else:
- raise ValueError('inconsistent dimensions {} and {}'.format(self.shape(), tensor.shape()))
+ raise ValueError('inconsistent dimensions {} and {}'.format(
+ self.shape(), tensor.shape()))
def ne_(self, tensor):
"""
@@ -1118,7 +1120,8 @@ class TensorBase(object):
"""Computes the histogram of a tensor and Returns it"""
if self.encrypted:
return NotImplemented
- hist, edges = np.histogram(np.array(self.data), bins=bins, range=(min, max))
+ hist, edges = np.histogram(
+ np.array(self.data), bins=bins, range=(min, max))
return TensorBase(hist)
def scatter_(self, dim, index, src):
@@ -1136,19 +1139,22 @@ class TensorBase(object):
if index.data.dtype != np.dtype('int_'):
raise TypeError("The values of index must be integers")
if self.data.ndim != index.data.ndim:
- raise ValueError("Index should have the same number of dimensions as output")
+ raise ValueError(
+ "Index should have the same number of dimensions as output")
if dim >= self.data.ndim or dim < -self.data.ndim:
raise IndexError("dim is out of range")
if dim < 0:
# Not sure why scatter should accept dim < 0, but that is the behavior in PyTorch's scatter
dim = self.data.ndim + dim
- idx_xsection_shape = index.data.shape[:dim] + index.data.shape[dim + 1:]
+ idx_xsection_shape = index.data.shape[:dim] + \
+ index.data.shape[dim + 1:]
self_xsection_shape = self.data.shape[:dim] + self.data.shape[dim + 1:]
if idx_xsection_shape != self_xsection_shape:
raise ValueError("Except for dimension " + str(dim) +
", all dimensions of index and output should be the same size")
if (index.data >= self.data.shape[dim]).any() or (index.data < 0).any():
- raise IndexError("The values of index must be between 0 and (self.data.shape[dim] -1)")
+ raise IndexError(
+ "The values of index must be between 0 and (self.data.shape[dim] -1)")
def make_slice(arr, dim, i):
slc = [slice(None)] * arr.ndim
@@ -1165,7 +1171,8 @@ class TensorBase(object):
if not np.isscalar(src):
src = _ensure_tensorbase(src)
if index.data.shape[dim] > src.data.shape[dim]:
- raise IndexError("Dimension " + str(dim) + "of index can not be bigger than that of src ")
+ raise IndexError("Dimension " + str(dim) +
+ "of index can not be bigger than that of src ")
src_shape = src.data.shape[:dim] + src.data.shape[dim + 1:]
if idx_xsection_shape != src_shape:
raise ValueError("Except for dimension " +
@@ -1173,7 +1180,8 @@ class TensorBase(object):
# src_idx is a NumPy advanced index for indexing of elements in the src
src_idx = list(idx)
src_idx.pop(dim)
- src_idx.insert(dim, np.repeat(np.arange(index.data.shape[dim]), np.prod(idx_xsection_shape)))
+ src_idx.insert(dim, np.repeat(
+ np.arange(index.data.shape[dim]), np.prod(idx_xsection_shape)))
self.data[idx] = src.data[src_idx]
else:
@@ -1195,7 +1203,8 @@ class TensorBase(object):
index = _ensure_tensorbase(index)
if self.encrypted or index.encrypted:
return NotImplemented
- idx_xsection_shape = index.data.shape[:dim] + index.data.shape[dim + 1:]
+ idx_xsection_shape = index.data.shape[:dim] + \
+ index.data.shape[dim + 1:]
self_xsection_shape = self.data.shape[:dim] + self.data.shape[dim + 1:]
if idx_xsection_shape != self_xsection_shape:
raise ValueError("Except for dimension " + str(dim) +
@@ -1281,7 +1290,8 @@ class TensorBase(object):
return NotImplemented
mask_self_iter = np.nditer([mask.data, self.data])
source_iter = np.nditer(source.data)
- out_flat = [s if m == 0 else source_iter.__next__().item() for m, s in mask_self_iter]
+ out_flat = [s if m == 0 else source_iter.__next__().item()
+ for m, s in mask_self_iter]
self.data = np.reshape(out_flat, self.data.shape)
return self
@@ -1325,13 +1335,26 @@ class TensorBase(object):
self.data = np.equal(self.data, _ensure_tensorbase(t).data)
return self
+ def mm(self, tensor2):
+ """Performs a matrix multiplication of :attr:`tensor1` and :attr:`tensor2`.
+
+ If :attr:`tensor1` is a `n x m` Tensor, :attr:`tensor2` is a `m x p` Tensor,
+ output will be a `n x p` Tensor.
+
+ Args:
+ tensor1 (Tensor): First Tensor to be multiplied
+ tensor2 (Tensor): Second Tensor to be multiplied"""
+
+ return syft.mm(self, tensor2)
+
def mv(tensormat, tensorvector):
""" matrix and vector multiplication """
if tensormat.encrypted or tensorvector.encrypted:
raise NotImplemented
elif not len(tensorvector.data.shape) == 1:
- raise ValueError('Vector dimensions not correct {}'.format(tensorvector.data.shape))
+ raise ValueError('Vector dimensions not correct {}'.format(
+ tensorvector.data.shape))
elif tensorvector.data.shape[0] != tensormat.data.shape[1]:
raise ValueError('vector dimensions {} not \
compatible with matrix {} '.format(tensorvector.data.shape, tensormat.data.shape))
@@ -1352,6 +1375,7 @@ def masked_select(tensor, mask):
tensor = _ensure_tensorbase(tensor)
if tensor.encrypted or mask.encrypted:
raise NotImplemented
- mask_broadcasted, data_broadcasted = np.broadcast_arrays(mask.data, tensor.data)
+ mask_broadcasted, data_broadcasted = np.broadcast_arrays(
+ mask.data, tensor.data)
indices = np.where(mask_broadcasted)
return TensorBase(data_broadcasted[indices])
| Implement Default mm Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, mm() should return a new tensor. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_math.py b/tests/test_math.py
index 1d66568765..56fc48a1d3 100644
--- a/tests/test_math.py
+++ b/tests/test_math.py
@@ -207,3 +207,23 @@ class unsqueezeTests(unittest.TestCase):
expected_shape.insert(i, 1)
self.assertTrue(np.array_equal(out.data.shape, expected_shape))
+
+
+class mmtest(unittest.TestCase):
+ def testmm1d(self):
+ t1 = TensorBase(np.array([2, 3, 4]))
+ t2 = TensorBase(np.array([3, 4, 5]))
+ out = syft.mm(t1, t2)
+ self.assertTrue(np.alltrue(out.data == [38]))
+
+ def testmm2d(self):
+ t1 = TensorBase(np.array([[1, 2], [1, 2]]))
+ t2 = TensorBase(np.array([[2, 3], [2, 3]]))
+ out = syft.mm(t1, t2)
+ self.assertTrue(np.alltrue(out.data == [[6, 9], [6, 9]]))
+
+ def testmm3d(self):
+ t1 = TensorBase(np.array([[1, 2], [2, 3], [3, 4]]))
+ t2 = TensorBase(np.array([[1, 2, 3], [2, 3, 4]]))
+ out = syft.mm(t1, t2)
+ self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]]))
diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index fe66a1ff28..363f04380d 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -1066,5 +1066,25 @@ class eqTests(unittest.TestCase):
self.assertEqual(t1, [False, True, False, False, False])
+class mm_test(unittest.TestCase):
+ def testmm1d(self):
+ t1 = TensorBase(np.array([2, 3, 4]))
+ t2 = TensorBase(np.array([3, 4, 5]))
+ out = t1.mm(t2)
+ self.assertTrue(np.alltrue(out.data == [38]))
+
+ def testmm2d(self):
+ t1 = TensorBase(np.array([[1, 2], [1, 2]]))
+ t2 = TensorBase(np.array([[2, 3], [2, 3]]))
+ out = t1.mm(t2)
+ self.assertTrue(np.alltrue(out.data == [[6, 9], [6, 9]]))
+
+ def testmm3d(self):
+ t1 = TensorBase(np.array([[1, 2], [2, 3], [3, 4]]))
+ t2 = TensorBase(np.array([[1, 2, 3], [2, 3, 4]]))
+ out = t1.mm(t2)
+ self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]]))
+
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"line_profiler",
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
attrs==22.2.0
certifi==2021.5.30
clint==0.5.1
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
joblib==1.1.1
line-profiler==4.1.3
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
phe==1.5.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pyRserve==1.0.4
pytest==7.0.1
pytest-flake8==1.1.1
scikit-learn==0.24.2
scipy==1.5.4
sklearn==0.0
-e git+https://github.com/OpenMined/PySyft.git@b0d646922b7529f5198dcc21fc856c7e9e598976#egg=syft
threadpoolctl==3.1.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- attrs==22.2.0
- clint==0.5.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- joblib==1.1.1
- line-profiler==4.1.3
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- phe==1.5.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pyrserve==1.0.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- scikit-learn==0.24.2
- scipy==1.5.4
- sklearn==0.0
- threadpoolctl==3.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PySyft
| [
"tests/test_math.py::mmtest::testmm1d",
"tests/test_math.py::mmtest::testmm2d",
"tests/test_math.py::mmtest::testmm3d",
"tests/test_tensor.py::mm_test::testmm1d",
"tests/test_tensor.py::mm_test::testmm2d",
"tests/test_tensor.py::mm_test::testmm3d"
] | [] | [
"tests/test_math.py::ConvenienceTests::testOnes",
"tests/test_math.py::ConvenienceTests::testRand",
"tests/test_math.py::ConvenienceTests::testZeros",
"tests/test_math.py::DotTests::testDotFloat",
"tests/test_math.py::DotTests::testDotInt",
"tests/test_math.py::CeilTests::testCeil",
"tests/test_math.py::FloorTests::testFloor",
"tests/test_math.py::CumsumTests::testCumsum",
"tests/test_math.py::CumprodTests::testCumprod",
"tests/test_math.py::SigmoidTests::testSigmoid",
"tests/test_math.py::MatmulTests::testMatmul1DFloat",
"tests/test_math.py::MatmulTests::testMatmul1DInt",
"tests/test_math.py::MatmulTests::testMatmul2DFloat",
"tests/test_math.py::MatmulTests::testMatmul2DIdentity",
"tests/test_math.py::MatmulTests::testMatmul2DInt",
"tests/test_math.py::admmTests::testaddmm1d",
"tests/test_math.py::admmTests::testaddmm2d",
"tests/test_math.py::addcmulTests::testaddcmul1d",
"tests/test_math.py::addcmulTests::testaddcmul2d",
"tests/test_math.py::addcdivTests::testaddcdiv1d",
"tests/test_math.py::addcdivTests::testaddcdiv2d",
"tests/test_math.py::addmv::testaddmv",
"tests/test_math.py::addbmmTests::testaddbmm",
"tests/test_math.py::baddbmmTests::testbaddbmm",
"tests/test_math.py::transposeTests::testTranspose",
"tests/test_math.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::DimTests::testAsView",
"tests/test_tensor.py::DimTests::testDimOne",
"tests/test_tensor.py::DimTests::testResize",
"tests/test_tensor.py::DimTests::testResizeAs",
"tests/test_tensor.py::DimTests::testSize",
"tests/test_tensor.py::DimTests::testView",
"tests/test_tensor.py::AddTests::testInplace",
"tests/test_tensor.py::AddTests::testScalar",
"tests/test_tensor.py::AddTests::testSimple",
"tests/test_tensor.py::CeilTests::testCeil",
"tests/test_tensor.py::CeilTests::testCeil_",
"tests/test_tensor.py::ZeroTests::testZero",
"tests/test_tensor.py::FloorTests::testFloor_",
"tests/test_tensor.py::SubTests::testInplace",
"tests/test_tensor.py::SubTests::testScalar",
"tests/test_tensor.py::SubTests::testSimple",
"tests/test_tensor.py::MaxTests::testAxis",
"tests/test_tensor.py::MaxTests::testNoDim",
"tests/test_tensor.py::MultTests::testInplace",
"tests/test_tensor.py::MultTests::testScalar",
"tests/test_tensor.py::MultTests::testSimple",
"tests/test_tensor.py::DivTests::testInplace",
"tests/test_tensor.py::DivTests::testScalar",
"tests/test_tensor.py::DivTests::testSimple",
"tests/test_tensor.py::AbsTests::testabs",
"tests/test_tensor.py::AbsTests::testabs_",
"tests/test_tensor.py::ShapeTests::testShape",
"tests/test_tensor.py::SqrtTests::testSqrt",
"tests/test_tensor.py::SqrtTests::testSqrt_",
"tests/test_tensor.py::SumTests::testDimIsNotNoneInt",
"tests/test_tensor.py::SumTests::testDimNoneInt",
"tests/test_tensor.py::EqualTests::testEqOp",
"tests/test_tensor.py::EqualTests::testEqual",
"tests/test_tensor.py::EqualTests::testIneqOp",
"tests/test_tensor.py::EqualTests::testNotEqual",
"tests/test_tensor.py::IndexTests::testIndexing",
"tests/test_tensor.py::sigmoidTests::testSigmoid",
"tests/test_tensor.py::addmm::testaddmm1d",
"tests/test_tensor.py::addmm::testaddmm2d",
"tests/test_tensor.py::addmm::testaddmm_1d",
"tests/test_tensor.py::addmm::testaddmm_2d",
"tests/test_tensor.py::addcmulTests::testaddcmul1d",
"tests/test_tensor.py::addcmulTests::testaddcmul2d",
"tests/test_tensor.py::addcmulTests::testaddcmul_1d",
"tests/test_tensor.py::addcmulTests::testaddcmul_2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv2d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_1d",
"tests/test_tensor.py::addcdivTests::testaddcdiv_2d",
"tests/test_tensor.py::addmvTests::testaddmv",
"tests/test_tensor.py::addmvTests::testaddmv_",
"tests/test_tensor.py::addbmmTests::testaddbmm",
"tests/test_tensor.py::addbmmTests::testaddbmm_",
"tests/test_tensor.py::baddbmmTests::testbaddbmm",
"tests/test_tensor.py::baddbmmTests::testbaddbmm_",
"tests/test_tensor.py::transposeTests::testT",
"tests/test_tensor.py::transposeTests::testT_",
"tests/test_tensor.py::transposeTests::testTranspose",
"tests/test_tensor.py::transposeTests::testTranspose_",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze",
"tests/test_tensor.py::unsqueezeTests::testUnsqueeze_",
"tests/test_tensor.py::expTests::testexp",
"tests/test_tensor.py::expTests::testexp_",
"tests/test_tensor.py::fracTests::testfrac",
"tests/test_tensor.py::fracTests::testfrac_",
"tests/test_tensor.py::rsqrtTests::testrsqrt",
"tests/test_tensor.py::rsqrtTests::testrsqrt_",
"tests/test_tensor.py::signTests::testsign",
"tests/test_tensor.py::signTests::testsign_",
"tests/test_tensor.py::numpyTests::testnumpy",
"tests/test_tensor.py::reciprocalTests::testreciprocal",
"tests/test_tensor.py::reciprocalTests::testrsqrt_",
"tests/test_tensor.py::logTests::testLog",
"tests/test_tensor.py::logTests::testLog1p",
"tests/test_tensor.py::logTests::testLog1p_",
"tests/test_tensor.py::logTests::testLog_",
"tests/test_tensor.py::clampTests::testClampFloat",
"tests/test_tensor.py::clampTests::testClampFloatInPlace",
"tests/test_tensor.py::clampTests::testClampInt",
"tests/test_tensor.py::clampTests::testClampIntInPlace",
"tests/test_tensor.py::cloneTests::testClone",
"tests/test_tensor.py::chunkTests::testChunk",
"tests/test_tensor.py::chunkTests::testChunkSameSize",
"tests/test_tensor.py::gtTests::testGtInPlaceWithNumber",
"tests/test_tensor.py::gtTests::testGtInPlaceWithTensor",
"tests/test_tensor.py::gtTests::testGtWithNumber",
"tests/test_tensor.py::gtTests::testGtWithTensor",
"tests/test_tensor.py::gtTests::testWithEncrypted",
"tests/test_tensor.py::geTests::testGeInPlaceWithNumber",
"tests/test_tensor.py::geTests::testGeInPlaceWithTensor",
"tests/test_tensor.py::geTests::testGeWithNumber",
"tests/test_tensor.py::geTests::testGeWithTensor",
"tests/test_tensor.py::geTests::testWithEncrypted",
"tests/test_tensor.py::ltTests::testLtInPlaceWithNumber",
"tests/test_tensor.py::ltTests::testLtInPlaceWithTensor",
"tests/test_tensor.py::ltTests::testLtWithNumber",
"tests/test_tensor.py::ltTests::testLtWithTensor",
"tests/test_tensor.py::ltTests::testWithEncrypted",
"tests/test_tensor.py::leTests::testLeInPlaceWithNumber",
"tests/test_tensor.py::leTests::testLeInPlaceWithTensor",
"tests/test_tensor.py::leTests::testLeWithNumber",
"tests/test_tensor.py::leTests::testLeWithTensor",
"tests/test_tensor.py::leTests::testWithEncrypted",
"tests/test_tensor.py::bernoulliTests::testBernoulli",
"tests/test_tensor.py::bernoulliTests::testBernoulli_",
"tests/test_tensor.py::uniformTests::testUniform",
"tests/test_tensor.py::uniformTests::testUniform_",
"tests/test_tensor.py::fillTests::testFill_",
"tests/test_tensor.py::topkTests::testTopK",
"tests/test_tensor.py::tolistTests::testToList",
"tests/test_tensor.py::traceTests::testTrace",
"tests/test_tensor.py::roundTests::testRound",
"tests/test_tensor.py::roundTests::testRound_",
"tests/test_tensor.py::repeatTests::testRepeat",
"tests/test_tensor.py::powTests::testPow",
"tests/test_tensor.py::powTests::testPow_",
"tests/test_tensor.py::prodTests::testProd",
"tests/test_tensor.py::randomTests::testRandom_",
"tests/test_tensor.py::nonzeroTests::testNonZero",
"tests/test_tensor.py::cumprodTest::testCumprod",
"tests/test_tensor.py::cumprodTest::testCumprod_",
"tests/test_tensor.py::splitTests::testSplit",
"tests/test_tensor.py::squeezeTests::testSqueeze",
"tests/test_tensor.py::expandAsTests::testExpandAs",
"tests/test_tensor.py::meanTests::testMean",
"tests/test_tensor.py::notEqualTests::testNe",
"tests/test_tensor.py::notEqualTests::testNe_",
"tests/test_tensor.py::index_selectTests::testIndex_select",
"tests/test_tensor.py::gatherTests::testGatherNumerical1",
"tests/test_tensor.py::gatherTests::testGatherNumerical2",
"tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange",
"tests/test_tensor.py::scatterTests::testScatter_IndexType",
"tests/test_tensor.py::scatterTests::testScatter_Numerical0",
"tests/test_tensor.py::scatterTests::testScatter_Numerical1",
"tests/test_tensor.py::scatterTests::testScatter_Numerical2",
"tests/test_tensor.py::scatterTests::testScatter_Numerical3",
"tests/test_tensor.py::scatterTests::testScatter_Numerical4",
"tests/test_tensor.py::scatterTests::testScatter_Numerical5",
"tests/test_tensor.py::scatterTests::testScatter_Numerical6",
"tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch",
"tests/test_tensor.py::remainderTests::testRemainder",
"tests/test_tensor.py::remainderTests::testRemainder_",
"tests/test_tensor.py::remainderTests::testRemainder_broadcasting",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting1",
"tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting2",
"tests/test_tensor.py::masked_fill_Tests::testMasked_fill_",
"tests/test_tensor.py::masked_fill_Tests::testMasked_fill_broadcasting",
"tests/test_tensor.py::masked_select_Tests::testMasked_select",
"tests/test_tensor.py::masked_select_Tests::testMasked_select_broadcasting1",
"tests/test_tensor.py::masked_select_Tests::testMasked_select_broadcasting2",
"tests/test_tensor.py::masked_select_Tests::testTensorBase_Masked_select",
"tests/test_tensor.py::eqTests::testEqInPlaceWithNumber",
"tests/test_tensor.py::eqTests::testEqInPlaceWithTensor",
"tests/test_tensor.py::eqTests::testEqWithNumber",
"tests/test_tensor.py::eqTests::testEqWithTensor"
] | [] | Apache License 2.0 | 1,708 | 2,315 | [
"syft/__init__.py",
"syft/math.py",
"syft/tensor.py"
] |
|
Azure__msrest-for-python-54 | 36172c1011c1a6b62eb57f7608ef571b71747a1a | 2017-09-28 18:30:24 | 24deba7a7a9e335314058ec2d0b39a710f61be60 | diff --git a/msrest/serialization.py b/msrest/serialization.py
index cc03063..61e811f 100644
--- a/msrest/serialization.py
+++ b/msrest/serialization.py
@@ -623,6 +623,8 @@ class Serializer(object):
in the iterable into a combined string. Default is 'None'.
:rtype: list, str
"""
+ if isinstance(data, str):
+ raise SerializationError("Refuse str type as a valid iter type.")
serialized = []
for d in data:
try:
| Should refuse a string as a valid list of string
Ends up in the portal as ['a','b','c'] if we use `Model('abc')` instead of `Model(['abc'])`. Should fail, accepting a string for a list of string is likely an error and not a feature.
See https://github.com/Azure/azure-sdk-for-python/issues/1376#issuecomment-323409463 | Azure/msrest-for-python | diff --git a/tests/test_serialization.py b/tests/test_serialization.py
index 4d6a80a..39f2878 100644
--- a/tests/test_serialization.py
+++ b/tests/test_serialization.py
@@ -736,6 +736,10 @@ class TestRuntimeSerialized(unittest.TestCase):
b = self.s.serialize_iter([], 'int')
self.assertEqual(b, [])
+ def test_serialize_str_as_iter(self):
+ with self.assertRaises(SerializationError):
+ self.s.serialize_iter("I am a string", 'str')
+
def test_serialize_json_obj(self):
class ComplexId(Model):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest_v2",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": [
"dev_requirements.txt"
],
"test_cmd": "pytest -xvs"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
httpretty==1.1.4
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
-e git+https://github.com/Azure/msrest-for-python.git@36172c1011c1a6b62eb57f7608ef571b71747a1a#egg=msrest
oauthlib==3.2.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
requests==2.27.1
requests-oauthlib==2.0.0
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: msrest-for-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- httpretty==1.1.4
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- oauthlib==3.2.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- requests==2.27.1
- requests-oauthlib==2.0.0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/msrest-for-python
| [
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter",
"tests/test_serialization.py::TestRuntimeSerialized::test_validate",
"tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none",
"tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str",
"tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object",
"tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage",
"tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json",
"tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map",
"tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr",
"tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization",
"tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape",
"tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization",
"tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality"
] | [] | [
"tests/test_serialization.py::TestModelDeserialization::test_response",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_int",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_none",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence",
"tests/test_serialization.py::TestRuntimeSerialized::test_attr_str",
"tests/test_serialization.py::TestRuntimeSerialized::test_empty_list",
"tests/test_serialization.py::TestRuntimeSerialized::test_key_type",
"tests/test_serialization.py::TestRuntimeSerialized::test_model_validate",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map",
"tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object",
"tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types"
] | [] | MIT License | 1,711 | 134 | [
"msrest/serialization.py"
] |
|
unt-libraries__py-wasapi-client-18 | 509c7dcac70c7e9ef03a2fac10dc2c5d6479cbb8 | 2017-09-28 20:42:05 | 509c7dcac70c7e9ef03a2fac10dc2c5d6479cbb8 | diff --git a/wasapi_client.py b/wasapi_client.py
index 5b1ea4e..0336263 100755
--- a/wasapi_client.py
+++ b/wasapi_client.py
@@ -19,16 +19,15 @@ except:
from queue import Empty
from urllib.parse import urlencode
+NAME = 'wasapi_client' if __name__ == '__main__' else __name__
-NAME = 'wasapi-client' if __name__ == '__main__' else __name__
-
-MAIN_LOGGER = logging.getLogger('main')
+LOGGER = logging.getLogger(NAME)
READ_LIMIT = 1024 * 512
-def do_listener_logging(log_q, path=''):
- formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
+def start_listener_logging(log_q, path=''):
+ formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
if path:
handler = logging.FileHandler(filename=path)
else:
@@ -39,16 +38,27 @@ def do_listener_logging(log_q, path=''):
listener = logging.handlers.QueueListener(log_q, handler)
listener.start()
- # Add the handler to the logger, so records from this process are written.
- logger = logging.getLogger(NAME)
- logger.addHandler(handler)
return listener
-def configure_worker_logging(log_q, log_level=logging.ERROR, logger_name=None):
- logger = logging.getLogger(logger_name)
- logger.setLevel(log_level)
- logger.addHandler(logging.handlers.QueueHandler(log_q))
+def configure_main_logging(log_q, log_level=logging.ERROR):
+ """Put a handler on the root logger.
+
+ This allows handling log records from imported modules.
+ """
+ root = logging.getLogger()
+ root.addHandler(logging.handlers.QueueHandler(log_q))
+ root.setLevel(log_level)
+
+
+def configure_worker_logging(log_q, log_level=logging.ERROR):
+ """Configure logging for worker processes."""
+ # Remove any existing handlers.
+ LOGGER.handlers = []
+ # Prevent root logger duplicating messages.
+ LOGGER.propagate = False
+ LOGGER.addHandler(logging.handlers.QueueHandler(log_q))
+ LOGGER.setLevel(log_level)
class WASAPIDownloadError(Exception):
@@ -75,7 +85,7 @@ def get_webdata(webdata_uri, session):
response = session.get(webdata_uri)
except requests.exceptions.ConnectionError as err:
sys.exit('Could not connect at {}:\n{}'.format(webdata_uri, err))
- MAIN_LOGGER.info('requesting {}'.format(webdata_uri))
+ LOGGER.info('requesting {}'.format(webdata_uri))
if response.status_code == 403:
sys.exit('Verify user/password for {}:\n{} {}'.format(webdata_uri,
response.status_code,
@@ -188,13 +198,13 @@ def download_file(file_data, session, output_path):
try:
write_file(response, output_path)
except OSError as err:
- logging.error('{}: {}'.format(location, str(err)))
+ LOGGER.error('{}: {}'.format(location, str(err)))
break
# Successful download; don't try alternate locations.
- logging.info(msg)
+ LOGGER.info(msg)
return None
else:
- logging.error(msg)
+ LOGGER.error(msg)
# We didn't download successfully; raise error.
msg = 'FAILED to download {} from {}'.format(file_data['filename'],
file_data['locations'])
@@ -219,17 +229,17 @@ def verify_file(checksums, file_path):
hash_function = getattr(hashlib, algorithm, None)
if not hash_function:
# The hash algorithm provided is not supported by hashlib.
- logging.debug('{} is unsupported'.format(algorithm))
+ LOGGER.debug('{} is unsupported'.format(algorithm))
continue
digest = calculate_sum(hash_function, file_path)
if digest == value:
- logging.info('Checksum success at: {}'.format(file_path))
+ LOGGER.info('Checksum success at: {}'.format(file_path))
return True
else:
- logging.error('Checksum {} mismatch for {}: expected {}, got {}'.format(algorithm,
- file_path,
- value,
- digest))
+ LOGGER.error('Checksum {} mismatch for {}: expected {}, got {}'.format(algorithm,
+ file_path,
+ value,
+ digest))
return False
# We didn't find a compatible algorithm.
return False
@@ -312,7 +322,7 @@ class Downloader(multiprocessing.Process):
try:
download_file(file_data, self.session, output_path)
except WASAPIDownloadError as err:
- logging.error(str(err))
+ LOGGER.error(str(err))
else:
# If we download the file without error, verify the checksum.
if verify_file(file_data['checksums'], output_path):
@@ -365,7 +375,7 @@ def _parse_args(args=sys.argv[1:]):
action='store_true',
dest='skip_manifest',
help='do not generate checksum files (ignored'
- ' when used in combination with --manifest')
+ ' when used in combination with --manifest)')
parser.add_argument('-u',
'--user',
dest='user',
@@ -443,7 +453,7 @@ def main():
manager = multiprocessing.Manager()
log_q = manager.Queue()
try:
- listener = do_listener_logging(log_q, args.log)
+ listener = start_listener_logging(log_q, args.log)
except OSError as err:
print('Could not open file for logging:', err)
sys.exit(1)
@@ -453,7 +463,7 @@ def main():
log_level = [logging.ERROR, logging.INFO, logging.DEBUG][args.verbose]
except IndexError:
log_level = logging.DEBUG
- configure_worker_logging(log_q, log_level, 'main')
+ configure_main_logging(log_q, log_level)
# Generate query string for the webdata request.
try:
@@ -499,8 +509,15 @@ def main():
destination=args.destination)
get_q = downloads.get_q
result_q = manager.Queue()
- for _ in range(args.processes):
- Downloader(get_q, result_q, log_q, log_level, auth, args.destination).start()
+
+ download_processes = []
+ num_processes = min(args.processes, get_q.qsize())
+ for _ in range(num_processes):
+ dp = Downloader(get_q, result_q, log_q, log_level, auth, args.destination)
+ dp.start()
+ download_processes.append(dp)
+ for dp in download_processes:
+ dp.join()
get_q.join()
listener.stop()
| Duplicate logging messages
The same messages are being logged multiple times--at least with more than one download process running. | unt-libraries/py-wasapi-client | diff --git a/tests/test_wasapi_client.py b/tests/test_wasapi_client.py
index 2424886..9ff5c7f 100644
--- a/tests/test_wasapi_client.py
+++ b/tests/test_wasapi_client.py
@@ -385,13 +385,13 @@ class Test_verify_file:
path = 'dummy/path'
checksums = {algorithm: checksum}
mock_calc_sum.return_value = checksum + 'notmatching'
- with patch('wasapi_client.logging', autospec=True) as mock_logging:
+ with patch('wasapi_client.LOGGER', autospec=True) as mock_logger:
assert not wc.verify_file(checksums, path)
msg = 'Checksum {} mismatch for {}: expected {}, got {}notmatching'.format(algorithm,
path,
checksum,
checksum)
- mock_logging.error.assert_called_once_with(msg)
+ mock_logger.error.assert_called_once_with(msg)
@patch('wasapi_client.calculate_sum')
def test_verify_file_one_supported_algorithm(self, mock_calc_sum):
@@ -400,11 +400,11 @@ class Test_verify_file:
checksums = OrderedDict([('abc', 'algorithm_unsupported'),
('sha1', checksum)])
mock_calc_sum.return_value = checksum
- with patch('wasapi_client.logging', autospec=True) as mock_logging:
+ with patch('wasapi_client.LOGGER', autospec=True) as mock_logger:
assert wc.verify_file(checksums, 'dummy/path')
# Check that unsupported algorithm was tried.
- mock_logging.debug.assert_called_once_with('abc is unsupported')
- mock_logging.info.assert_called_once_with('Checksum success at: dummy/path')
+ mock_logger.debug.assert_called_once_with('abc is unsupported')
+ mock_logger.info.assert_called_once_with('Checksum success at: dummy/path')
class Test_calculate_sum:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/unt-libraries/py-wasapi-client.git@509c7dcac70c7e9ef03a2fac10dc2c5d6479cbb8#egg=py_wasapi_client
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: py-wasapi-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/py-wasapi-client
| [
"tests/test_wasapi_client.py::Test_verify_file::test_verify_file_checksum_mismatch",
"tests/test_wasapi_client.py::Test_verify_file::test_verify_file_one_supported_algorithm"
] | [] | [
"tests/test_wasapi_client.py::Test_make_session::test_make_session_auth",
"tests/test_wasapi_client.py::Test_make_session::test_make_session_no_auth",
"tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata",
"tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata_403_forbidden",
"tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata_ConnectionError",
"tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata_json_error",
"tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads",
"tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_multi_page",
"tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_no_get_q",
"tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_urls",
"tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_manifest",
"tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_manifest_destination",
"tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_generate_manifest",
"tests/test_wasapi_client.py::Test_Downloads::test_write_manifest_file",
"tests/test_wasapi_client.py::Test_Downloads::test_write_manifest_file_wrong_algorithm",
"tests/test_wasapi_client.py::Test_get_files_count::test_get_files_count",
"tests/test_wasapi_client.py::Test_get_files_size::test_get_files_size",
"tests/test_wasapi_client.py::Test_get_files_size::test_get_files_size_multi_page",
"tests/test_wasapi_client.py::Test_get_files_size::test_get_files_size_no_files",
"tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[0-0.0B]",
"tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1023-1023.0B]",
"tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1024-1.0KB]",
"tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1024000-1000.0KB]",
"tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1048576-1.0MB]",
"tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1073741824-1.0GB]",
"tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1099511628000-1.0TB]",
"tests/test_wasapi_client.py::Test_download_file::test_download_file_200",
"tests/test_wasapi_client.py::Test_download_file::test_download_file_not_200",
"tests/test_wasapi_client.py::Test_download_file::test_download_file_OSError",
"tests/test_wasapi_client.py::Test_verify_file::test_verify_file",
"tests/test_wasapi_client.py::Test_verify_file::test_verify_file_unsupported_algorithm",
"tests/test_wasapi_client.py::Test_calculate_sum::test_calculate_sum",
"tests/test_wasapi_client.py::Test_convert_queue::test_convert_queue",
"tests/test_wasapi_client.py::Test_generate_report::test_generate_report_all_success",
"tests/test_wasapi_client.py::Test_generate_report::test_generate_report_one_failure",
"tests/test_wasapi_client.py::Test_generate_report::test_generate_report_all_failure",
"tests/test_wasapi_client.py::TestDownloader::test_run",
"tests/test_wasapi_client.py::TestDownloader::test_run_WASAPIDownloadError",
"tests/test_wasapi_client.py::Test_parse_args::test_SetQueryParametersAction",
"tests/test_wasapi_client.py::Test_parse_args::test_SetQueryParametersAction_multiple_collections"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,713 | 1,491 | [
"wasapi_client.py"
] |
|
vertexproject__synapse-445 | ca3e448523e3e09729f884f54d2135ebf9ff3c08 | 2017-09-29 13:01:03 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/models/inet.py b/synapse/models/inet.py
index 97d9311f8..6df0a55c5 100644
--- a/synapse/models/inet.py
+++ b/synapse/models/inet.py
@@ -144,18 +144,31 @@ class Srv4Type(DataType):
if s_compat.isstr(valu):
return self._norm_str(valu, oldval=oldval)
+ if valu < 0 or valu > 281474976710655:
+ self._raiseBadValu(valu, mesg='Srv4Type integer is out of bounds')
+
addr = valu >> 16
port = valu & 0xffff
return valu, {'port': port, 'ipv4': addr}
def _norm_str(self, text, oldval=None):
+ if ':' not in text:
+ try:
+ valu = int(text)
+ except ValueError:
+ self._raiseBadValu(text, mesg='Srv4Type string is not a integer or a colon delimited string.')
+ return self.norm(valu)
+
try:
astr, pstr = text.split(':')
except ValueError as e:
- self._raiseBadValu(text)
+ self._raiseBadValu(text, mesg='Unable to split Srv4Type into two parts')
addr = ipv4int(astr)
port = int(pstr, 0)
+ if port < 0 or port > 65535:
+ self._raiseBadValu(text, port=port,
+ mesg='Srv4 Port number is out of bounds')
return (addr << 16) | port, {'port': port, 'ipv4': addr}
srv6re = re.compile('^\[([a-f0-9:]+)\]:(\d+)$')
| inet:srv4 type doesn't handle integer as string input
inet:tcp4="123456789012345" doesn't norm correctly | vertexproject/synapse | diff --git a/synapse/tests/test_model_inet.py b/synapse/tests/test_model_inet.py
index d36f28b0a..16418b499 100644
--- a/synapse/tests/test_model_inet.py
+++ b/synapse/tests/test_model_inet.py
@@ -123,6 +123,39 @@ class InetModelTest(SynTest):
self.eq(t3[1].get('inet:udp4:port'), 8443)
self.eq(t3[1].get('inet:udp4:ipv4'), core.getTypeNorm('inet:ipv4', '1.2.3.4')[0])
+ # 1.2.3.4:8443
+ t4 = core.formTufoByProp('inet:udp4', '1108152164603')
+ self.eq(t4[1].get('inet:udp4:port'), 8443)
+ self.eq(t4[1].get('inet:udp4:ipv4'), core.getTypeNorm('inet:ipv4', '1.2.3.4')[0])
+
+ # Ensure boundaries are observed
+ for i in ['0', 0, '0.0.0.0:0']:
+ valu, subs = core.getTypeNorm('inet:srv4', i)
+ self.eq(valu, 0)
+ self.eq(subs.get('port'), 0)
+ self.eq(subs.get('ipv4'), 0)
+
+ for i in ['281474976710655', 281474976710655, '255.255.255.255:65535']:
+ valu, subs = core.getTypeNorm('inet:srv4', i)
+ self.eq(valu, 281474976710655)
+ self.eq(subs.get('port'), 0xFFFF)
+ self.eq(subs.get('ipv4'), 0xFFFFFFFF)
+
+ # Repr works as expected
+ self.eq(core.getTypeRepr('inet:srv4', 0), '0.0.0.0:0')
+ self.eq(core.getTypeRepr('inet:srv4', 1108152164603), '1.2.3.4:8443')
+ self.eq(core.getTypeRepr('inet:srv4', 281474976710655), '255.255.255.255:65535')
+
+ # Ensure bad input fails
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '281474976710656')
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', 281474976710656)
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '255.255.255.255:65536')
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '255.255.255.255:-1')
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', -1)
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '-1')
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', 'ruh roh')
+ self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '1.2.3.4:8080:9090')
+
def test_model_inet_srv6_types(self):
with self.getRamCore() as core:
t0 = core.formTufoByProp('inet:tcp6', '[0:0:0:0:0:0:0:1]:80')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y build-essential libffi-dev libssl-dev python3 python3-dev python3-pip python3-setuptools"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
cffi==1.15.1
coverage==6.2
cryptography==40.0.2
importlib-metadata==4.8.3
iniconfig==1.1.1
lmdb==1.6.2
msgpack-python==0.5.6
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
-e git+https://github.com/vertexproject/synapse.git@ca3e448523e3e09729f884f54d2135ebf9ff3c08#egg=synapse
tomli==1.2.3
tornado==6.1
typing_extensions==4.1.1
xxhash==3.2.0
zipp==3.6.0
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cffi==1.15.1
- coverage==6.2
- cryptography==40.0.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lmdb==1.6.2
- msgpack-python==0.5.6
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pyopenssl==23.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- tomli==1.2.3
- tornado==6.1
- typing-extensions==4.1.1
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_srv4_types"
] | [] | [
"synapse/tests/test_model_inet.py::InetModelTest::test_model_fqdn_punycode",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201706121318",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201706201837",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201709181501",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_asnet4",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_cast_defang",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_cidr4",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_email",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_follows",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn_set_sfx",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn_unicode",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv4",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv4_raise",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv6",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_mac",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_netmemb",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_netmesg",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_netpost",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_passwd",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_srv6_types",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_url_fields",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_urlfile",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_weblogon",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_whois_recns",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_whoisemail",
"synapse/tests/test_model_inet.py::InetModelTest::test_model_whois_contact"
] | [] | Apache License 2.0 | 1,714 | 440 | [
"synapse/models/inet.py"
] |
|
CORE-GATECH-GROUP__serpent-tools-21 | b67f52bfd0b23baa3eae9f11bab1af14bd8b2798 | 2017-09-29 14:33:04 | b67f52bfd0b23baa3eae9f11bab1af14bd8b2798 | diff --git a/serpentTools/__init__.py b/serpentTools/__init__.py
index 8e3f32c..3e8e8de 100644
--- a/serpentTools/__init__.py
+++ b/serpentTools/__init__.py
@@ -1,7 +1,7 @@
from serpentTools import settings
from serpentTools import parsers
-__version__ = '0.1.3'
+__version__ = '0.1.4'
# List TODOS/feature requests here for now
# Messages/Errors
diff --git a/serpentTools/objects/__init__.py b/serpentTools/objects/__init__.py
index 8f2e2cd..2ef74d4 100644
--- a/serpentTools/objects/__init__.py
+++ b/serpentTools/objects/__init__.py
@@ -1,8 +1,5 @@
"""Objects used to support the parsing."""
-import numpy
-from matplotlib import pyplot
-
class _SupportingObject(object):
"""
@@ -17,14 +14,12 @@ class _SupportingObject(object):
"""
- def __init__(self, container, name):
+ def __init__(self, container):
self._container = container
- self.name = name
self._filePath = container.filePath
def __repr__(self):
- return '<{} {} from {}>'.format(self.whatAmI(),
- self.name, self._filePath)
+ return '<{} from {}>'.format(self.whatAmI(), self._filePath)
def whatAmI(self):
return type(self).__name__
@@ -47,194 +42,13 @@ class _SupportingObject(object):
for item in lowerSplits[1:]])
-class DepletedMaterial(_SupportingObject):
- """Class for storing material data from ``_dep.m`` files.
-
- Parameters
- ----------
- parser: :py:class:`~serpentTools.parsers.depletion.DepletionReader`
- Parser that found this material.
- Used to obtain file metadata like isotope names and burnup
- name: str
- Name of this material
-
- Attributes
- ----------
- zai: numpy.array
- Isotope id's
- names: numpy.array
- Names of isotopes
- days: numpy.array
- Days overwhich the material was depleted
- adens: numpy.array
- Atomic density over time for each nuclide
-
- :note:
-
- These attributes only exist if the pasers was instructed to
- read in this data. I.e. if ``readers.depletion.metadataKeys``
- does not contain ``ZAI``, then this object will not have
- the ``zai`` data.
-
- """
-
- def __init__(self, parser, name):
- _SupportingObject.__init__(self, parser, name)
- self._varData = {}
-
- def __getattr__(self, item):
- """
- Allows the user to get items like ``zai`` and ``adens``
- with ``self.zai`` and ``self.adens``, respectively.
- """
- if item in self._varData:
- return self._varData[item]
- return _SupportingObject.__getattr__(self, item)
-
- def __getitem__(self, item):
- if item not in self._varData:
- if item not in self._container.metadata:
- raise KeyError('{} has no item {}'.format(self, item))
- return self._container.metadata[item]
- return self._varData[item]
-
- def addData(self, variable, rawData):
- """Add data straight from the file onto a variable.
-
- Parameters
- ----------
- variable: str
- Name of the variable directly from ``SERPENT``
- rawData: list
- List of strings corresponding to the raw data from the file
- """
- newName = self._convertVariableName(variable)
- if isinstance(rawData, str):
- scratch = [float(item) for item in rawData.split()]
- else:
- scratch = []
- for line in rawData:
- if line:
- scratch.append([float(item) for item in line.split()])
- self._varData[newName] = numpy.array(scratch)
-
- def getXY(self, xUnits, yUnits, timePoints=None, names=None):
- """Return x values for given time, and corresponding isotope values.
-
- Parameters
- ----------
- xUnits: str
- name of x value to obtain, e.g. ``'days'``, ``'burnup'``
- yUnits: str
- name of y value to return, e.g. ``'adens'``, ``'burnup'``
- timePoints: list or None
- If given, select the time points according to those specified here.
- Otherwise, select all points
- names: list or None
- If given, return y values corresponding to these isotope names.
- Otherwise, return values for all isotopes.
-
- Returns
- -------
- numpy.array
- Array of values.
- numpy.array
- Vector of time points only if ``timePoints`` is ``None``
-
- Raises
- ------
- AttributeError
- If the names of the isotopes have not been obtained and specific
- isotopes have been requested
- KeyError
- If at least one of the days requested is not present
- """
- if timePoints is not None:
- returnX = False
- timeCheck = self._checkTimePoints(xUnits, timePoints)
- if any(timeCheck):
- raise KeyError('The following times were not present in file {}'
- '\n{}'.format(self._container.filePath,
- ', '.join(timeCheck)))
- else:
- returnX = True
- if names and 'names' not in self._container.metadata:
- raise AttributeError('Parser {} has not stored the isotope names.'
- .format(self._container))
- xVals, colIndices = self._getXSlice(xUnits, timePoints)
- rowIndices = self._getIsoID(names)
- allY = self[yUnits]
- if allY.shape[0] == 1 or len(allY.shape) == 1: # vector
- return xVals, allY[colIndices] if colIndices else allY
- yVals = numpy.empty((len(rowIndices), len(xVals)), dtype=float)
- for isoID, rowId in enumerate(rowIndices):
- yVals[isoID, :] = (allY[rowId][colIndices] if colIndices
- else allY[rowId][:])
- if returnX:
- return yVals, xVals
- return yVals
-
- def _checkTimePoints(self, xUnits, timePoints):
- valid = self[xUnits]
- badPoints = [str(time) for time in timePoints if time not in valid]
- return badPoints
-
-
- def _getXSlice(self, xUnits, timePoints):
- allX = self[xUnits]
- if timePoints is not None:
- colIndices = [indx for indx, xx in enumerate(allX)
- if xx in timePoints]
- xVals = allX[colIndices]
- else:
- colIndices = None
- xVals = allX
- return xVals, colIndices
-
- def _getIsoID(self, isotopes):
- """Return the row indices that correspond to specfic isotopes."""
- # TODO: List comprehension to make rowIDs then return array
- if not isotopes:
- return numpy.array(list(range(len(self.names))), dtype=int)
- isoList = [isotopes] if isinstance(isotopes, (str, int)) else isotopes
- rowIDs = numpy.empty_like(isoList, dtype=int)
- for indx, isotope in enumerate(isoList):
- rowIDs[indx] = self.names.index(isotope)
- return rowIDs
-
- def plot(self, xUnits, yUnits, timePoints=None, names=None, ax=None):
- """Plot some data as a function of time for some or all isotopes.
-
- Parameters
- ----------
- xUnits: str
- name of x value to obtain, e.g. ``'days'``, ``'burnup'``
- yUnits: str
- name of y value to return, e.g. ``'adens'``, ``'burnup'``
- timePoints: list or None
- If given, select the time points according to those
- specified here. Otherwise, select all points
- names: list or None
- If given, return y values corresponding to these isotope
- names. Otherwise, return values for all isotopes.
- ax: None or ``matplotlib axes``
- If given, add the data to this plot.
- Otherwise, create a new plot
-
- Returns
- -------
- ``matplotlib axes``
- Axes corresponding to the figure that was plotted
+class _NamedObject(_SupportingObject):
+ """Class for named objects like materials and detectors."""
- See Also
- --------
- getXY
-
- """
- xVals, yVals = self.getXY(xUnits, yUnits, timePoints, names)
- ax = ax or pyplot.subplots(1, 1)[1]
- labels = names or [None]
- for row in range(yVals.shape[0]):
- ax.plot(xVals, yVals[row], label=labels[row])
+ def __init__(self, container, name):
+ _SupportingObject.__init__(self, container)
+ self.name = name
- return ax
+ def __repr__(self):
+ return '<{} {} from {}>'.format(self.whatAmI(),
+ self.name, self._filePath)
\ No newline at end of file
diff --git a/serpentTools/objects/materials.py b/serpentTools/objects/materials.py
new file mode 100644
index 0000000..be2fe02
--- /dev/null
+++ b/serpentTools/objects/materials.py
@@ -0,0 +1,199 @@
+"""Classes for storing material data."""
+
+import numpy
+from matplotlib import pyplot
+
+
+from serpentTools.objects import _NamedObject
+
+
+class DepletedMaterial(_NamedObject):
+ """Class for storing material data from ``_dep.m`` files.
+
+ Parameters
+ ----------
+ parser: :py:class:`~serpentTools.parsers.depletion.DepletionReader`
+ Parser that found this material.
+ Used to obtain file metadata like isotope names and burnup
+ name: str
+ Name of this material
+
+ Attributes
+ ----------
+ zai: numpy.array
+ Isotope id's
+ names: numpy.array
+ Names of isotopes
+ days: numpy.array
+ Days overwhich the material was depleted
+ adens: numpy.array
+ Atomic density over time for each nuclide
+
+ :note:
+
+ These attributes only exist if the pasers was instructed to
+ read in this data. I.e. if ``readers.depletion.metadataKeys``
+ does not contain ``ZAI``, then this object will not have
+ the ``zai`` data.
+
+ """
+
+ def __init__(self, parser, name):
+ _NamedObject.__init__(self, parser, name)
+ self._varData = {}
+
+ def __getattr__(self, item):
+ """
+ Allows the user to get items like ``zai`` and ``adens``
+ with ``self.zai`` and ``self.adens``, respectively.
+ """
+ if item in self._varData:
+ return self._varData[item]
+ return _NamedObject.__getattr__(self, item)
+
+ def __getitem__(self, item):
+ if item not in self._varData:
+ if item not in self._container.metadata:
+ raise KeyError('{} has no item {}'.format(self, item))
+ return self._container.metadata[item]
+ return self._varData[item]
+
+ def addData(self, variable, rawData):
+ """Add data straight from the file onto a variable.
+
+ Parameters
+ ----------
+ variable: str
+ Name of the variable directly from ``SERPENT``
+ rawData: list
+ List of strings corresponding to the raw data from the file
+ """
+ newName = self._convertVariableName(variable)
+ if isinstance(rawData, str):
+ scratch = [float(item) for item in rawData.split()]
+ else:
+ scratch = []
+ for line in rawData:
+ if line:
+ scratch.append([float(item) for item in line.split()])
+ self._varData[newName] = numpy.array(scratch)
+
+ def getXY(self, xUnits, yUnits, timePoints=None, names=None):
+ """Return x values for given time, and corresponding isotope values.
+
+ Parameters
+ ----------
+ xUnits: str
+ name of x value to obtain, e.g. ``'days'``, ``'burnup'``
+ yUnits: str
+ name of y value to return, e.g. ``'adens'``, ``'burnup'``
+ timePoints: list or None
+ If given, select the time points according to those specified here.
+ Otherwise, select all points
+ names: list or None
+ If given, return y values corresponding to these isotope names.
+ Otherwise, return values for all isotopes.
+
+ Returns
+ -------
+ numpy.array
+ Array of values.
+ numpy.array
+ Vector of time points only if ``timePoints`` is ``None``
+
+ Raises
+ ------
+ AttributeError
+ If the names of the isotopes have not been obtained and specific
+ isotopes have been requested
+ KeyError
+ If at least one of the days requested is not present
+ """
+ if timePoints is not None:
+ returnX = False
+ timeCheck = self._checkTimePoints(xUnits, timePoints)
+ if any(timeCheck):
+ raise KeyError('The following times were not present in file {}'
+ '\n{}'.format(self._container.filePath,
+ ', '.join(timeCheck)))
+ else:
+ returnX = True
+ if names and 'names' not in self._container.metadata:
+ raise AttributeError('Parser {} has not stored the isotope names.'
+ .format(self._container))
+ xVals, colIndices = self._getXSlice(xUnits, timePoints)
+ rowIndices = self._getIsoID(names)
+ allY = self[yUnits]
+ if allY.shape[0] == 1 or len(allY.shape) == 1: # vector
+ yVals = allY[colIndices] if colIndices else allY
+ else:
+ yVals = numpy.empty((len(rowIndices), len(xVals)), dtype=float)
+ for isoID, rowId in enumerate(rowIndices):
+ yVals[isoID, :] = (allY[rowId][colIndices] if colIndices
+ else allY[rowId][:])
+ if returnX:
+ return yVals, xVals
+ return yVals
+
+ def _checkTimePoints(self, xUnits, timePoints):
+ valid = self[xUnits]
+ badPoints = [str(time) for time in timePoints if time not in valid]
+ return badPoints
+
+ def _getXSlice(self, xUnits, timePoints):
+ allX = self[xUnits]
+ if timePoints is not None:
+ colIndices = [indx for indx, xx in enumerate(allX)
+ if xx in timePoints]
+ xVals = allX[colIndices]
+ else:
+ colIndices = None
+ xVals = allX
+ return xVals, colIndices
+
+ def _getIsoID(self, isotopes):
+ """Return the row indices that correspond to specfic isotopes."""
+ # TODO: List comprehension to make rowIDs then return array
+ if not isotopes:
+ return numpy.array(list(range(len(self.names))), dtype=int)
+ isoList = [isotopes] if isinstance(isotopes, (str, int)) else isotopes
+ rowIDs = numpy.empty_like(isoList, dtype=int)
+ for indx, isotope in enumerate(isoList):
+ rowIDs[indx] = self.names.index(isotope)
+ return rowIDs
+
+ def plot(self, xUnits, yUnits, timePoints=None, names=None, ax=None):
+ """Plot some data as a function of time for some or all isotopes.
+
+ Parameters
+ ----------
+ xUnits: str
+ name of x value to obtain, e.g. ``'days'``, ``'burnup'``
+ yUnits: str
+ name of y value to return, e.g. ``'adens'``, ``'burnup'``
+ timePoints: list or None
+ If given, select the time points according to those
+ specified here. Otherwise, select all points
+ names: list or None
+ If given, return y values corresponding to these isotope
+ names. Otherwise, return values for all isotopes.
+ ax: None or ``matplotlib axes``
+ If given, add the data to this plot.
+ Otherwise, create a new plot
+
+ Returns
+ -------
+ ``matplotlib axes``
+ Axes corresponding to the figure that was plotted
+
+ See Also
+ --------
+ getXY
+
+ """
+ xVals, yVals = self.getXY(xUnits, yUnits, timePoints, names)
+ ax = ax or pyplot.subplots(1, 1)[1]
+ labels = names or [None]
+ for row in range(yVals.shape[0]):
+ ax.plot(xVals, yVals[row], label=labels[row])
+ return ax
diff --git a/serpentTools/parsers/depletion.py b/serpentTools/parsers/depletion.py
index 8a3b695..002f516 100644
--- a/serpentTools/parsers/depletion.py
+++ b/serpentTools/parsers/depletion.py
@@ -6,7 +6,7 @@ import numpy
from drewtils.parsers import KeywordParser
from serpentTools.objects.readers import MaterialReader
-from serpentTools.objects import DepletedMaterial
+from serpentTools.objects.materials import DepletedMaterial
class DepletionReader(MaterialReader):
| Days are still returned in time points are given for a vector quantity on materials, e.g. burnup
Fix in #2 did not take in to account quantities like `burnup` and `volume` that do not return arrays for isotope quantities.
If the user specifies one of these quantities from a depleted material, the time points are still returned.
```
if allY.shape[0] == 1 or len(allY.shape) == 1: # vector
return xVals, allY[colIndices] if colIndices else allY
```
change to
```
if allY.shape[0] == 1 or len(allY.shape) == 1: # vector
yVals = allY[colIndices] if colIndices else allY
else:
yVals = numpy.empty((len(rowIndices), len(xVals)), dtype=float)
for isoID, rowId in enumerate(rowIndices):
yVals[isoID, :] = (allY[rowId][colIndices] if colIndices
else allY[rowId][:])`
```
and fix unit tests | CORE-GATECH-GROUP/serpent-tools | diff --git a/serpentTools/tests/test_depletion.py b/serpentTools/tests/test_depletion.py
index d7f6d52..6cb911d 100644
--- a/serpentTools/tests/test_depletion.py
+++ b/serpentTools/tests/test_depletion.py
@@ -124,13 +124,12 @@ class DepletedMaterialTester(_DepletionTestHelper):
"""
Verify the material can produce the full burnup vector through getXY.
"""
- _days, actual = self.material.getXY('days', 'burnup', )
+ actual, _days = self.material.getXY('days', 'burnup', )
numpy.testing.assert_equal(actual, self.fuelBU)
def test_getXY_burnup_slice(self):
"""Verify depletedMaterial getXY correctly slices a vector."""
- _days, actual = self.material.getXY('days', 'burnup',
- self.requestedDays)
+ actual = self.material.getXY('days', 'burnup', self.requestedDays)
expected = [0.0E0, 1.90317E-2, 3.60163E-2, 1.74880E-1, 3.45353E-01,
8.49693E-01, 1.66071E0]
numpy.testing.assert_equal(actual, expected)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | contourpy==1.3.0
cycler==0.12.1
drewtils==0.1.9
exceptiongroup==1.2.2
fonttools==4.56.0
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
-e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@b67f52bfd0b23baa3eae9f11bab1af14bd8b2798#egg=serpentTools
six==1.17.0
tomli==2.2.1
zipp==3.21.0
| name: serpent-tools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- contourpy==1.3.0
- cycler==0.12.1
- drewtils==0.1.9
- exceptiongroup==1.2.2
- fonttools==4.56.0
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.2.1
- zipp==3.21.0
prefix: /opt/conda/envs/serpent-tools
| [
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_full",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_slice"
] | [] | [
"serpentTools/tests/test_depletion.py::DepletionTester::test_ReadMaterials",
"serpentTools/tests/test_depletion.py::DepletionTester::test_metadata",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_fetchData",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_adens",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_adensAndTime",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_raisesError_badTime",
"serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_materials"
] | [] | MIT License | 1,715 | 4,277 | [
"serpentTools/__init__.py",
"serpentTools/objects/__init__.py",
"serpentTools/parsers/depletion.py"
] |
|
sendgrid__sendgrid-python-348 | 08845196565bee2cdc1efafdd9d27018c2530eea | 2017-10-01 18:30:18 | 172bf1bbdbcd1259566b72acc456de65a63ffa3f | diff --git a/sendgrid/helpers/mail/mail.py b/sendgrid/helpers/mail/mail.py
index de41bad..a2159b2 100644
--- a/sendgrid/helpers/mail/mail.py
+++ b/sendgrid/helpers/mail/mail.py
@@ -262,11 +262,15 @@ class Email(object):
def __init__(self, email=None, name=None):
self._name = None
self._email = None
-
- if email is not None:
- self.email = email
- if name is not None:
- self.name = name
+ if name or email:
+ if not name:
+ # allows passing emails as "dude Fella <[email protected]>"
+ self.parse_email(email)
+ else:
+ #allows backwards compatibility for Email(email, name)
+ if email is not None:
+ self.email = email
+ self.name = name
@property
def name(self):
@@ -293,6 +297,28 @@ class Email(object):
email["email"] = self.email
return email
+ def parse_email(self, email_info):
+ try:
+ import rfc822
+ except ImportError:
+ import email.utils as rfc822
+
+ name, email = rfc822.parseaddr(email_info)
+
+ # more than likely a string was passed here instead of an email address
+ if "@" not in email:
+ name = email
+ email = None
+
+ if not name:
+ name = None
+
+ if not email:
+ email = None
+
+ self.name = name
+ self.email = email
+ return name, email
class Content(object):
| Inconsistent format of email from header “name <email>”
#### Issue Summary
Current implementation restrict use default `email_from` behavior. For more info please see https://tools.ietf.org/html/rfc2822#section-3.4
#### Steps to Reproduce
1. When user try to create `Email` instance from https://github.com/sendgrid/sendgrid-python/blob/master/sendgrid/helpers/mail/mail.py#L173-L179. You must specify
`from_email` and `from_name` as seperate parametrs
```
def get(self):
email = {}
if self.name != None:
email["name"] = self.name
if self.email != None:
email["email"] = self.email
return email
from_email = Email("[email protected]", "Example Name")
```
As a result client should split `from_email`:
```
try:
import rfc822
except ImportError:
import email.utils as rfc822
from_name, from_email = rfc822.parseaddr(email.from_email)
# Python sendgrid client should improve
# sendgrid/helpers/mail/mail.py:164
if not from_name:
from_name = None
Mail(from_email, from_name)
```
Proposal to how to improve this bahaviour.
Because if user now create `Email` from:
```
from_email = Email("Andrii Soldatenko <[email protected]>")
```
**Actual result:**

#### Technical details:
* sendgrid-python Version: master
* Python Version: all
#### References:
- https://github.com/sendgrid/sendgrid-python/issues/277
- RFC: https://tools.ietf.org/html/rfc2822#section-3.4
- Django docs: https://docs.djangoproject.com/en/1.10/topics/email/#emailmessage-objects
**NOTE:**
In `sengrid-python==1.4` this feature is working | sendgrid/sendgrid-python | diff --git a/test/test_email.py b/test/test_email.py
new file mode 100644
index 0000000..92ae10a
--- /dev/null
+++ b/test/test_email.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+import json
+
+from sendgrid.helpers.mail import (Email)
+
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
+
+class TestEmailObject(unittest.TestCase):
+ def test_add_email_address(self):
+ address = "[email protected]"
+ email = Email(address)
+
+ self.assertEqual(email.email, "[email protected]")
+
+ def test_add_name(self):
+ name = "SomeName"
+ email = Email(name=name)
+
+ self.assertEqual(email.name, name)
+
+ def test_add_name_email(self):
+ name = "SomeName"
+ address = "[email protected]"
+ email = Email(email=address, name=name)
+ self.assertEqual(email.name, name)
+ self.assertEqual(email.email, "[email protected]")
+
+ def test_add_rfc_function_finds_name_not_email(self):
+ name = "SomeName"
+ email = Email(name)
+
+ self.assertEqual(email.name, name)
+ self.assertIsNone(email.email)
+
+ def test_add_rfc_email(self):
+ name = "SomeName"
+ address = "[email protected]"
+ name_address = "{0} <{1}>".format(name, address)
+ email = Email(name_address)
+ self.assertEqual(email.name, name)
+ self.assertEqual(email.email, "[email protected]")
+
+ def test_empty_obj_add_name(self):
+ email = Email()
+ name = "SomeName"
+ email.name = name
+
+ self.assertEqual(email.name, name)
+
+ def test_empty_obj_add_email(self):
+ email = Email()
+ address = "[email protected]"
+ email.email = address
+
+ self.assertEqual(email.email, address)
\ No newline at end of file
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 5.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
dataclasses==0.8
Flask==0.10.1
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
MarkupSafe==2.0.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-http-client==3.3.7
PyYAML==3.11
-e git+https://github.com/sendgrid/sendgrid-python.git@08845196565bee2cdc1efafdd9d27018c2530eea#egg=sendgrid
six==1.10.0
tomli==1.2.3
typing_extensions==4.1.1
Werkzeug==2.0.3
zipp==3.6.0
| name: sendgrid-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- dataclasses==0.8
- flask==0.10.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- markupsafe==2.0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-http-client==3.3.7
- pyyaml==3.11
- six==1.10.0
- tomli==1.2.3
- typing-extensions==4.1.1
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/sendgrid-python
| [
"test/test_email.py::TestEmailObject::test_add_rfc_email",
"test/test_email.py::TestEmailObject::test_add_rfc_function_finds_name_not_email"
] | [] | [
"test/test_email.py::TestEmailObject::test_add_email_address",
"test/test_email.py::TestEmailObject::test_add_name",
"test/test_email.py::TestEmailObject::test_add_name_email",
"test/test_email.py::TestEmailObject::test_empty_obj_add_email",
"test/test_email.py::TestEmailObject::test_empty_obj_add_name"
] | [] | MIT License | 1,718 | 402 | [
"sendgrid/helpers/mail/mail.py"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.