instance_id
stringlengths 12
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2015-01-06 14:05:07
2025-04-29 17:56:51
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
158k
| patch
stringlengths 261
20.8k
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 280
206k
| meta
dict | version
stringclasses 463
values | install_config
dict | requirements
stringlengths 93
34k
⌀ | environment
stringlengths 772
20k
⌀ | FAIL_TO_PASS
sequencelengths 1
856
| FAIL_TO_FAIL
sequencelengths 0
536
| PASS_TO_PASS
sequencelengths 0
7.87k
| PASS_TO_FAIL
sequencelengths 0
92
| license_name
stringclasses 35
values | __index_level_0__
int64 11
21.4k
| num_tokens_patch
int64 103
4.99k
| before_filepaths
sequencelengths 0
14
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
altair-viz__altair-398 | dfed1d404821e21c25413579f8506b4be05561ad | 2017-10-02 18:54:28 | e37000c8f54bc5e0e98ea8457b9a3c913cd58ccb | diff --git a/altair/v1/__init__.py b/altair/v1/__init__.py
index 6239d9e1..62db76eb 100644
--- a/altair/v1/__init__.py
+++ b/altair/v1/__init__.py
@@ -43,6 +43,7 @@ from .api import (
OneOfFilter,
MaxRowsExceeded,
enable_mime_rendering,
+ disable_mime_rendering
)
from ..datasets import (
diff --git a/altair/v1/api.py b/altair/v1/api.py
index 51135a9e..1b0ecea0 100644
--- a/altair/v1/api.py
+++ b/altair/v1/api.py
@@ -64,6 +64,8 @@ DEFAULT_MAX_ROWS = 5000
# Rendering configuration
#*************************************************************************
+_original_ipython_display_ = None
+
# This is added to TopLevelMixin as a method if MIME rendering is enabled
def _repr_mimebundle_(self, include, exclude, **kwargs):
"""Return a MIME-bundle for rich display in the Jupyter Notebook."""
@@ -75,8 +77,22 @@ def _repr_mimebundle_(self, include, exclude, **kwargs):
def enable_mime_rendering():
"""Enable MIME bundle based rendering used in JupyterLab/nteract."""
# This is what makes Python fun!
- delattr(TopLevelMixin, '_ipython_display_')
- TopLevelMixin._repr_mimebundle_ = _repr_mimebundle_
+ global _original_ipython_display_
+ if _original_ipython_display_ is None:
+ TopLevelMixin._repr_mimebundle_ = _repr_mimebundle_
+ _original_ipython_display_ = TopLevelMixin._ipython_display_
+ delattr(TopLevelMixin, '_ipython_display_')
+
+
+def disable_mime_rendering():
+ """Disable MIME bundle based rendering used in JupyterLab/nteract."""
+ global _original_ipython_display_
+ if _original_ipython_display_ is not None:
+ delattr(TopLevelMixin, '_repr_mimebundle_')
+ TopLevelMixin._ipython_display_ = _original_ipython_display_
+ _original_ipython_display_ = None
+
+
#*************************************************************************
# Channel Aliases
| Safer enabling of MIME rendering
Right now the `enable_mime_rendering()` function is not very safe:
* Can't call twice.
* Can't disable.
Easy to fix, but need to wait for #377 to be merged. | altair-viz/altair | diff --git a/altair/expr/tests/test_expr.py b/altair/expr/tests/test_expr.py
index 2c3b981b..328d64e5 100644
--- a/altair/expr/tests/test_expr.py
+++ b/altair/expr/tests/test_expr.py
@@ -112,7 +112,7 @@ def test_getitem_list(data):
assert set(dir(df2)) == {'xxx', 'yyy', 'calculated'}
# changing df2 shouldn't affect df1
- df2['qqq'] = df2.xxx / df2.yyy
+ df2['qqq'] = df2.xxx // df2.yyy
assert set(dir(df2)) == {'xxx', 'yyy', 'calculated', 'qqq'}
assert set(dir(df)) == {'xxx', 'yyy', 'zzz', 'calculated'}
diff --git a/altair/v1/tests/test_api.py b/altair/v1/tests/test_api.py
index b1e14e3a..75dacfce 100644
--- a/altair/v1/tests/test_api.py
+++ b/altair/v1/tests/test_api.py
@@ -448,7 +448,7 @@ def test_chart_serve():
def test_formula_expression():
- formula = Formula('blah', expr.log(expr.df.value) / expr.LN10)
+ formula = Formula('blah', expr.log(expr.df.value) // expr.LN10)
assert formula.field == 'blah'
assert formula.expr == '(log(datum.value)/LN10)'
@@ -579,3 +579,11 @@ def test_schema_url():
# Make sure that $schema
chart = Chart.from_dict(dct)
+
+
+def test_enable_mime_rendering():
+ # Make sure these functions are safe to call multiple times.
+ enable_mime_rendering()
+ enable_mime_rendering()
+ disable_mime_rendering()
+ disable_mime_rendering()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/altair-viz/altair.git@dfed1d404821e21c25413579f8506b4be05561ad#egg=altair
attrs==22.2.0
backcall==0.2.0
certifi==2021.5.30
coverage==6.2
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
numpy==1.19.5
packaging==21.3
pandas==1.1.5
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli==1.2.3
traitlets==4.3.3
typing_extensions==4.1.1
vega==0.4.4
wcwidth==0.2.13
zipp==3.6.0
| name: altair
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- backcall==0.2.0
- coverage==6.2
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tomli==1.2.3
- traitlets==4.3.3
- typing-extensions==4.1.1
- vega==0.4.4
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/altair
| [
"altair/v1/tests/test_api.py::test_enable_mime_rendering"
] | [
"altair/v1/tests/test_api.py::test_savechart_html",
"altair/v1/tests/test_api.py::test_savechart_json",
"altair/v1/tests/test_api.py::test_Chart_from_dict",
"altair/v1/tests/test_api.py::test_to_python",
"altair/v1/tests/test_api.py::test_to_python_stocks",
"altair/v1/tests/test_api.py::test_data_finalization",
"altair/v1/tests/test_api.py::test_layered_chart_iadd",
"altair/v1/tests/test_api.py::test_chart_add",
"altair/v1/tests/test_api.py::test_chart_to_json",
"altair/v1/tests/test_api.py::test_chart_serve",
"altair/v1/tests/test_api.py::test_max_rows",
"altair/v1/tests/test_api.py::test_schema_url"
] | [
"altair/expr/tests/test_expr.py::test_dataframe_namespace",
"altair/expr/tests/test_expr.py::test_dataframe_newcols",
"altair/expr/tests/test_expr.py::test_unary_operations",
"altair/expr/tests/test_expr.py::test_binary_operations",
"altair/expr/tests/test_expr.py::test_abs",
"altair/expr/tests/test_expr.py::test_expr_funcs",
"altair/expr/tests/test_expr.py::test_expr_consts",
"altair/expr/tests/test_expr.py::test_getitem_list",
"altair/expr/tests/test_expr.py::test_json_reprs",
"altair/v1/tests/test_api.py::test_default_mark",
"altair/v1/tests/test_api.py::test_mark_methods",
"altair/v1/tests/test_api.py::test_chart_url_input",
"altair/v1/tests/test_api.py::test_chart_to_html",
"altair/v1/tests/test_api.py::test_chart_to_json_round_trip",
"altair/v1/tests/test_api.py::test_encode_update",
"altair/v1/tests/test_api.py::test_configure_update",
"altair/v1/tests/test_api.py::test_configure_axis_update",
"altair/v1/tests/test_api.py::test_configure_cell_update",
"altair/v1/tests/test_api.py::test_configure_legend_update",
"altair/v1/tests/test_api.py::test_configure_mark_update",
"altair/v1/tests/test_api.py::test_configure_scale_update",
"altair/v1/tests/test_api.py::test_configure_facet_axis_update",
"altair/v1/tests/test_api.py::test_configure_facet_cell_update",
"altair/v1/tests/test_api.py::test_configure_facet_grid_update",
"altair/v1/tests/test_api.py::test_configure_facet_scale_update",
"altair/v1/tests/test_api.py::test_transform_update",
"altair/v1/tests/test_api.py::test_Chart_load_example",
"altair/v1/tests/test_api.py::test_to_python_with_methods",
"altair/v1/tests/test_api.py::test_mark_config[area]",
"altair/v1/tests/test_api.py::test_mark_config[bar]",
"altair/v1/tests/test_api.py::test_mark_config[line]",
"altair/v1/tests/test_api.py::test_mark_config[point]",
"altair/v1/tests/test_api.py::test_mark_config[text]",
"altair/v1/tests/test_api.py::test_mark_config[tick]",
"altair/v1/tests/test_api.py::test_mark_config[rule]",
"altair/v1/tests/test_api.py::test_mark_config[circle]",
"altair/v1/tests/test_api.py::test_mark_config[square]",
"altair/v1/tests/test_api.py::test_mark_config[errorBar]",
"altair/v1/tests/test_api.py::test_config_methods[params0]",
"altair/v1/tests/test_api.py::test_config_methods[params1]",
"altair/v1/tests/test_api.py::test_config_methods[params2]",
"altair/v1/tests/test_api.py::test_config_methods[params3]",
"altair/v1/tests/test_api.py::test_config_facet_grid",
"altair/v1/tests/test_api.py::test_finalize",
"altair/v1/tests/test_api.py::test_formula_expression",
"altair/v1/tests/test_api.py::test_filter_expression",
"altair/v1/tests/test_api.py::test_df_formula",
"altair/v1/tests/test_api.py::test_df_filter",
"altair/v1/tests/test_api.py::test_df_filter_multiple",
"altair/v1/tests/test_api.py::test_chart_dir",
"altair/v1/tests/test_api.py::test_empty_traits"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,721 | 537 | [
"altair/v1/__init__.py",
"altair/v1/api.py"
] |
|
wright-group__WrightTools-318 | 2be1155987558d6c7358654e9f26068f5359df96 | 2017-10-03 00:06:45 | 592649ce55c9fa7847325c9e9b15b320a38f1389 | pep8speaks: Hello @ksunden! Thanks for submitting the PR.
- In the file [`WrightTools/data/_data.py`](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/data/_data.py), following are the PEP8 issues :
> [Line 1506:91](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/data/_data.py#L1506): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace
> [Line 1507:25](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/data/_data.py#L1507): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
- In the file [`WrightTools/exceptions.py`](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/exceptions.py), following are the PEP8 issues :
> [Line 29:1](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/exceptions.py#L29): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
| diff --git a/WrightTools/data/_data.py b/WrightTools/data/_data.py
index 8915e48..c023b51 100644
--- a/WrightTools/data/_data.py
+++ b/WrightTools/data/_data.py
@@ -1203,12 +1203,10 @@ class Data:
Notes
-----
-
m-factors originally derived by Carlson and Wright. [1]_
References
----------
-
.. [1] **Absorption and Coherent Interference Effects in Multiply Resonant
Four-Wave Mixing Spectroscopy**
Roger J. Carlson, and John C. Wright
@@ -1520,6 +1518,30 @@ class Data:
# finish
self._update()
+ def rename_attrs(self, **kwargs):
+ """Rename a set of attributes.
+
+ Keyword Arguments
+ -----------------
+ Each argument should have the key of a current axis or channel,
+ and a value which is a string of its new name.
+
+ The name will be set to str(val), and its natural naming identifier
+ will be wt.kit.string2identifier(str(val))
+ """
+ changed = kwargs.keys()
+ for k, v in kwargs.items():
+ if getattr(self, k).__class__ not in (Channel, Axis):
+ raise TypeError("Attribute for key %s: expected {Channel, Axis}, got %s" %
+ (k, getattr(self, k).__class__))
+ if v not in changed and hasattr(self, v):
+ raise wt_exceptions.NameNotUniqueError(v)
+ for k, v in kwargs.items():
+ axis = getattr(self, k)
+ axis.name = str(v)
+ delattr(self, k)
+ self._update()
+
def save(self, filepath=None, verbose=True):
"""Save using the `pickle`__ module.
diff --git a/WrightTools/exceptions.py b/WrightTools/exceptions.py
index 3c25ce4..8f3fa78 100644
--- a/WrightTools/exceptions.py
+++ b/WrightTools/exceptions.py
@@ -44,6 +44,21 @@ class FileNotFound(Exception):
Exception.__init__(self, message)
+class NameNotUniqueError(Exception):
+ """NameNotUniqueError."""
+
+ def __init__(self, name):
+ """Format a Name Not Unique Error.
+
+ Parameters
+ ----------
+ name : string
+ Name of an attribute which causes a duplication.
+ """
+ message = 'Name {} results in a duplicate'.format(name)
+ Exception.__init__(self, message)
+
+
# --- custom warnings -----------------------------------------------------------------------------
| rename methods
`data.rename_channel` and `data.rename_axis`
accept kwargs with key oldname and value newname
if newname already in names and not ALSO being renamed, raise helpful exception
remember to call `data._update`
not necessarily clear how to handle `axis.label_seed` | wright-group/WrightTools | diff --git a/tests/data/rename_attrs.py b/tests/data/rename_attrs.py
new file mode 100644
index 0000000..f032f7d
--- /dev/null
+++ b/tests/data/rename_attrs.py
@@ -0,0 +1,30 @@
+"""test rename_attrs."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import WrightTools as wt
+from WrightTools import datasets
+
+
+# --- test ----------------------------------------------------------------------------------------
+
+
+def test_rename():
+ p = datasets.PyCMDS.wm_w2_w1_000
+ data = wt.data.from_PyCMDS(p)
+ data.rename_attrs(w1='w2', w2='w1')
+ assert data.shape == (35, 11, 11)
+ assert data.axis_names == ['wm', 'w1', 'w2']
+
+
+def test_error():
+ p = datasets.PyCMDS.wm_w2_w1_000
+ data = wt.data.from_PyCMDS(p)
+ try:
+ data.rename_attrs(w1='w2')
+ except wt.exceptions.NameNotUniqueError:
+ assert True
+ else:
+ assert False
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 2.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
cycler==0.11.0
h5py==3.1.0
imageio==2.15.0
importlib-metadata==4.8.3
iniconfig==1.1.1
kiwisolver==1.3.1
matplotlib==3.3.4
numpy==1.19.5
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.5.4
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
-e git+https://github.com/wright-group/WrightTools.git@2be1155987558d6c7358654e9f26068f5359df96#egg=WrightTools
zipp==3.6.0
| name: WrightTools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- attrs==22.2.0
- cached-property==1.5.2
- cycler==0.11.0
- h5py==3.1.0
- imageio==2.15.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- kiwisolver==1.3.1
- matplotlib==3.3.4
- numpy==1.19.5
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.5.4
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/WrightTools
| [
"tests/data/rename_attrs.py::test_rename",
"tests/data/rename_attrs.py::test_error"
] | [] | [] | [] | MIT License | 1,723 | 620 | [
"WrightTools/data/_data.py",
"WrightTools/exceptions.py"
] |
jjhelmus__pyfive-35 | bdddda56cfcc51548f8a553bac96adc9919594ad | 2017-10-03 12:26:13 | d21662e6c95ed08a0b909f9aef4ee3f2428c0036 | diff --git a/pyfive/high_level.py b/pyfive/high_level.py
index 11c90ff..973f49d 100644
--- a/pyfive/high_level.py
+++ b/pyfive/high_level.py
@@ -3,6 +3,7 @@
from collections import Mapping, deque, Sequence
import os
from io import open # Python 2.7 requires for a Buffered Reader
+import posixpath
import numpy as np
@@ -49,33 +50,45 @@ class Group(Mapping):
""" Number of links in the group. """
return len(self._links)
+ def _dereference(self, ref):
+ """ Deference a Reference object. """
+ if not ref:
+ raise ValueError('cannot deference null reference')
+ obj = self.file._get_object_by_address(ref.address_of_reference)
+ if obj is None:
+ dataobjects = DataObjects(self.file._fh, ref.address_of_reference)
+ if dataobjects.is_dataset:
+ return Dataset(None, dataobjects, None, alt_file=self.file)
+ return Group(None, dataobjects, None, alt_file=self.file)
+ return obj
+
def __getitem__(self, y):
""" x.__getitem__(y) <==> x[y] """
if isinstance(y, Reference):
- if not y:
- raise ValueError('cannot deference null reference')
- obj = self.file._get_object_by_address(y.address_of_reference)
- if obj is None:
- dataobjs = DataObjects(self.file._fh, y.address_of_reference)
- if dataobjs.is_dataset:
- return Dataset(None, dataobjs, None, alt_file=self.file)
- return Group(None, dataobjs, None, alt_file=self.file)
- return obj
-
- y = y.strip('/')
-
- if y not in self._links:
- raise KeyError('%s not found in group' % (y))
-
- if self.name == '/':
- sep = ''
+ return self._dereference(y)
+
+ path = posixpath.normpath(y)
+ if path == '.':
+ return self
+ if path.startswith('/'):
+ return self.file[path[1:]]
+
+ if posixpath.dirname(path) != '':
+ next_obj, additional_obj = path.split('/', 1)
else:
- sep = '/'
+ next_obj = path
+ additional_obj = '.'
+
+ if next_obj not in self._links:
+ raise KeyError('%s not found in group' % (next_obj))
- dataobjs = DataObjects(self.file._fh, self._links[y])
+ obj_name = posixpath.join(self.name, next_obj)
+ dataobjs = DataObjects(self.file._fh, self._links[next_obj])
if dataobjs.is_dataset:
- return Dataset(self.name + sep + y, dataobjs, self)
- return Group(self.name + sep + y, dataobjs, self)
+ if additional_obj != '.':
+ raise KeyError('%s is a dataset, not a group' % (obj_name))
+ return Dataset(obj_name, dataobjs, self)
+ return Group(obj_name, dataobjs, self)[additional_obj]
def __iter__(self):
for k in self._links.keys():
| Access values with path
One thing that is not yet possible and that would help a lot. For example, with version 0.2.0:
Using Python 2.7.12 (default, Jul 1 2016, 15:12:24)
[GCC 5.4.0 20160609] on linux2
> > > import h5py
> > > import pyfive
> > >
> > > f5 = h5py.File('tests/latest.hdf5')
> > > f5["group1/subgroup1/dataset3"].value
> > > array([ 0., 1., 2., 3.], dtype=float32)
> > > ffive = pyfive.File('tests/latest.hdf5')
> > > ffive["group1/subgroup1/dataset3"].value
> > > Traceback (most recent call last):
> > > File "<stdin>", line 1, in <module>
> > > File "pyfive/high_level.py", line 48, in **getitem**
> > > raise KeyError('%s not found in group' % (y))
> > > KeyError: 'group1/subgroup1/dataset3 not found in group'
| jjhelmus/pyfive | diff --git a/tests/test_high_level.py b/tests/test_high_level.py
index 593ad24..9e14af3 100644
--- a/tests/test_high_level.py
+++ b/tests/test_high_level.py
@@ -138,3 +138,20 @@ def test_dataset_class():
assert dset1.parent.name == '/'
assert dset2.parent.name == '/group1'
+
+
+def test_get_objects_by_path():
+ # gh-15
+
+ with pyfive.File(EARLIEST_HDF5_FILE) as hfile:
+ grp = hfile['/group1']
+
+ assert hfile['/group1/subgroup1'].name == '/group1/subgroup1'
+ assert grp['/group1/subgroup1'].name == '/group1/subgroup1'
+
+ dset2 = hfile['group1/dataset2/']
+ assert dset2.name == '/group1/dataset2'
+
+ assert_raises(KeyError, hfile.__getitem__, 'group1/fake')
+ assert_raises(KeyError, hfile.__getitem__, 'group1/subgroup1/fake')
+ assert_raises(KeyError, hfile.__getitem__, 'group1/dataset2/fake')
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/jjhelmus/pyfive.git@bdddda56cfcc51548f8a553bac96adc9919594ad#egg=pyfive
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pyfive
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
- numpy==1.19.5
prefix: /opt/conda/envs/pyfive
| [
"tests/test_high_level.py::test_get_objects_by_path"
] | [] | [
"tests/test_high_level.py::test_file_class",
"tests/test_high_level.py::test_group_class",
"tests/test_high_level.py::test_dataset_class"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,725 | 729 | [
"pyfive/high_level.py"
] |
|
OpenMined__PySyft-286 | 40261aab99e6857e0ed2e34c26a315f16d9500f7 | 2017-10-03 19:11:06 | 06ce023225dd613d8fb14ab2046135b93ab22376 | diff --git a/syft/tensor.py b/syft/tensor.py
index 9dc2bab3ba..c01a50dd83 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -109,6 +109,30 @@ class TensorBase(object):
self.data = _ensure_ndarray(arr_like)
self.encrypted = encrypted
+ def new(self, *args, **kwargs):
+ """Constructs a new tensor instance of the same data type.
+
+ Parameters
+ ----------
+ *args
+ Variable length argument list used to instantiate
+ new TensorBase object.
+ **kwargs
+ Arbitrary keyword arguments used to instantiate
+ new TensorBase object.
+
+ Returns
+ -------
+ TensorBase class instance if parent TensorBase
+ has self.encrypted = False, otherwise return NotImplemented
+ error.
+
+ """
+ if self.encrypted:
+ return NotImplemented
+
+ return self.__class__(*args, **kwargs)
+
def _calc_mul_depth(self, tensor1, tensor2):
if isinstance(tensor1, TensorBase) and isinstance(tensor2, TensorBase):
self._mul_depth = max(tensor1._mul_depth, tensor2._mul_depth) + 1
| Implement Default new Functionality for Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, I want to leverage a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, new() should return a new tensor. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator. | OpenMined/PySyft | diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index d063417e29..32614ce752 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -1119,6 +1119,29 @@ class mm_test(unittest.TestCase):
self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]]))
+class newTensorTests(unittest.TestCase):
+ def test_encrypted_error(self):
+
+ t1 = TensorBase(np.array([1, 1, 1]), encrypted=True)
+ t2 = t1.new([1, 1, 2], encrypted=True)
+
+ self.assertEqual(t2, NotImplemented)
+
+ def test_return_new_float_tensor(self):
+
+ t1 = TensorBase(np.array([1, 1, 1]))
+ t2 = t1.new(np.array([1., 1., 2.]))
+
+ self.assertTrue(t2.data.dtype == np.float64)
+
+ def test_return_new_int_tensor(self):
+
+ t1 = TensorBase(np.array([1, 1, 1]))
+ t2 = t1.new(np.array([1, 1, 2]))
+
+ self.assertTrue(t2.data.dtype == np.int64)
+
+
class half(unittest.TestCase):
def half_test_1(self):
t1 = TensorBase(np.array([2, 3, 4]))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | args==0.1.0
attrs==22.2.0
certifi==2021.5.30
clint==0.5.1
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
joblib==1.1.1
line-profiler==4.1.3
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
phe==1.5.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pyRserve==1.0.4
pytest==7.0.1
pytest-flake8==1.1.1
scikit-learn==0.24.2
scipy==1.5.4
sklearn==0.0
-e git+https://github.com/OpenMined/PySyft.git@40261aab99e6857e0ed2e34c26a315f16d9500f7#egg=syft
threadpoolctl==3.1.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- args==0.1.0
- attrs==22.2.0
- clint==0.5.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- joblib==1.1.1
- line-profiler==4.1.3
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- phe==1.5.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pyrserve==1.0.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- scikit-learn==0.24.2
- scipy==1.5.4
- sklearn==0.0
- threadpoolctl==3.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PySyft
| [
"tests/test_tensor.py::newTensorTests::test_encrypted_error",
"tests/test_tensor.py::newTensorTests::test_return_new_float_tensor",
"tests/test_tensor.py::newTensorTests::test_return_new_int_tensor"
] | [] | [
"tests/test_tensor.py::DimTests::test_as_view",
"tests/test_tensor.py::DimTests::test_dim_one",
"tests/test_tensor.py::DimTests::test_nelement",
"tests/test_tensor.py::DimTests::test_resize",
"tests/test_tensor.py::DimTests::test_resize_as",
"tests/test_tensor.py::DimTests::test_size",
"tests/test_tensor.py::DimTests::test_view",
"tests/test_tensor.py::AddTests::test_inplace",
"tests/test_tensor.py::AddTests::test_scalar",
"tests/test_tensor.py::AddTests::test_simple",
"tests/test_tensor.py::CeilTests::test_ceil",
"tests/test_tensor.py::CeilTests::test_ceil_",
"tests/test_tensor.py::ZeroTests::test_zero",
"tests/test_tensor.py::FloorTests::test_floor_",
"tests/test_tensor.py::SubTests::test_inplace",
"tests/test_tensor.py::SubTests::test_scalar",
"tests/test_tensor.py::SubTests::test_simple",
"tests/test_tensor.py::MaxTests::test_axis",
"tests/test_tensor.py::MaxTests::test_no_dim",
"tests/test_tensor.py::MultTests::test_inplace",
"tests/test_tensor.py::MultTests::test_scalar",
"tests/test_tensor.py::MultTests::test_simple",
"tests/test_tensor.py::DivTests::test_inplace",
"tests/test_tensor.py::DivTests::test_scalar",
"tests/test_tensor.py::DivTests::test_simple",
"tests/test_tensor.py::AbsTests::test_abs",
"tests/test_tensor.py::AbsTests::test_abs_",
"tests/test_tensor.py::ShapeTests::test_shape",
"tests/test_tensor.py::SqrtTests::test_sqrt",
"tests/test_tensor.py::SqrtTests::test_sqrt_",
"tests/test_tensor.py::SumTests::test_dim_is_not_none_int",
"tests/test_tensor.py::SumTests::test_dim_none_int",
"tests/test_tensor.py::EqualTests::test_equal",
"tests/test_tensor.py::EqualTests::test_equal_operation",
"tests/test_tensor.py::EqualTests::test_inequality_operation",
"tests/test_tensor.py::EqualTests::test_not_equal",
"tests/test_tensor.py::IndexTests::test_indexing",
"tests/test_tensor.py::sigmoidTests::test_sigmoid",
"tests/test_tensor.py::addmm::test_addmm_1d",
"tests/test_tensor.py::addmm::test_addmm_2d",
"tests/test_tensor.py::addmm::test_addmm__1d",
"tests/test_tensor.py::addmm::test_addmm__2d",
"tests/test_tensor.py::addcmulTests::test_addcmul_1d",
"tests/test_tensor.py::addcmulTests::test_addcmul_2d",
"tests/test_tensor.py::addcmulTests::test_addcmul__1d",
"tests/test_tensor.py::addcmulTests::test_addcmul__2d",
"tests/test_tensor.py::addcdivTests::test_addcdiv_1d",
"tests/test_tensor.py::addcdivTests::test_addcdiv_2d",
"tests/test_tensor.py::addcdivTests::test_addcdiv__1d",
"tests/test_tensor.py::addcdivTests::test_addcdiv__2d",
"tests/test_tensor.py::addmvTests::test_addmv",
"tests/test_tensor.py::addmvTests::test_addmv_",
"tests/test_tensor.py::bmmTests::test_bmm",
"tests/test_tensor.py::bmmTests::test_bmm_size",
"tests/test_tensor.py::addbmmTests::test_addbmm",
"tests/test_tensor.py::addbmmTests::test_addbmm_",
"tests/test_tensor.py::baddbmmTests::test_baddbmm",
"tests/test_tensor.py::baddbmmTests::test_baddbmm_",
"tests/test_tensor.py::transposeTests::test_t",
"tests/test_tensor.py::transposeTests::test_transpose",
"tests/test_tensor.py::transposeTests::test_transpose_",
"tests/test_tensor.py::unsqueezeTests::test_unsqueeze",
"tests/test_tensor.py::unsqueezeTests::test_unsqueeze_",
"tests/test_tensor.py::expTests::test_exp",
"tests/test_tensor.py::expTests::test_exp_",
"tests/test_tensor.py::fracTests::test_frac",
"tests/test_tensor.py::fracTests::test_frac_",
"tests/test_tensor.py::rsqrtTests::test_rsqrt",
"tests/test_tensor.py::rsqrtTests::test_rsqrt_",
"tests/test_tensor.py::signTests::test_sign",
"tests/test_tensor.py::signTests::test_sign_",
"tests/test_tensor.py::numpyTests::test_numpy",
"tests/test_tensor.py::reciprocalTests::test_reciprocal",
"tests/test_tensor.py::reciprocalTests::test_reciprocal_",
"tests/test_tensor.py::logTests::test_log",
"tests/test_tensor.py::logTests::test_log_",
"tests/test_tensor.py::logTests::test_log_1p",
"tests/test_tensor.py::logTests::test_log_1p_",
"tests/test_tensor.py::clampTests::test_clamp_float",
"tests/test_tensor.py::clampTests::test_clamp_float_in_place",
"tests/test_tensor.py::clampTests::test_clamp_int",
"tests/test_tensor.py::clampTests::test_clamp_int_in_place",
"tests/test_tensor.py::cloneTests::test_clone",
"tests/test_tensor.py::chunkTests::test_chunk",
"tests/test_tensor.py::chunkTests::test_chunk_same_size",
"tests/test_tensor.py::gtTests::test_gt__in_place_with_number",
"tests/test_tensor.py::gtTests::test_gt__in_place_with_tensor",
"tests/test_tensor.py::gtTests::test_gt_with_encrypted",
"tests/test_tensor.py::gtTests::test_gt_with_number",
"tests/test_tensor.py::gtTests::test_gt_with_tensor",
"tests/test_tensor.py::geTests::test_ge__in_place_with_number",
"tests/test_tensor.py::geTests::test_ge__in_place_with_tensor",
"tests/test_tensor.py::geTests::test_ge_with_encrypted",
"tests/test_tensor.py::geTests::test_ge_with_number",
"tests/test_tensor.py::geTests::test_ge_with_tensor",
"tests/test_tensor.py::ltTests::test_lt__in_place_with_number",
"tests/test_tensor.py::ltTests::test_lt__in_place_with_tensor",
"tests/test_tensor.py::ltTests::test_lt_with_encrypted",
"tests/test_tensor.py::ltTests::test_lt_with_number",
"tests/test_tensor.py::ltTests::test_lt_with_tensor",
"tests/test_tensor.py::leTests::test_le__in_place_with_number",
"tests/test_tensor.py::leTests::test_le__in_place_with_tensor",
"tests/test_tensor.py::leTests::test_le_with_encrypted",
"tests/test_tensor.py::leTests::test_le_with_number",
"tests/test_tensor.py::leTests::test_le_with_tensor",
"tests/test_tensor.py::bernoulliTests::test_bernoulli",
"tests/test_tensor.py::bernoulliTests::test_bernoulli_",
"tests/test_tensor.py::cauchyTests::test_cauchy_",
"tests/test_tensor.py::uniformTests::test_uniform",
"tests/test_tensor.py::uniformTests::test_uniform_",
"tests/test_tensor.py::geometricTests::test_geometric_",
"tests/test_tensor.py::fillTests::test_fill_",
"tests/test_tensor.py::topkTests::test_topK",
"tests/test_tensor.py::tolistTests::test_to_list",
"tests/test_tensor.py::traceTests::test_trace",
"tests/test_tensor.py::roundTests::test_round",
"tests/test_tensor.py::roundTests::test_round_",
"tests/test_tensor.py::repeatTests::test_repeat",
"tests/test_tensor.py::powTests::test_pow",
"tests/test_tensor.py::powTests::test_pow_",
"tests/test_tensor.py::prodTests::test_prod",
"tests/test_tensor.py::randomTests::test_random_",
"tests/test_tensor.py::nonzeroTests::test_non_zero",
"tests/test_tensor.py::cumprodTest::test_cumprod",
"tests/test_tensor.py::cumprodTest::test_cumprod_",
"tests/test_tensor.py::splitTests::test_split",
"tests/test_tensor.py::squeezeTests::test_squeeze",
"tests/test_tensor.py::expandAsTests::test_expand_as",
"tests/test_tensor.py::meanTests::test_mean",
"tests/test_tensor.py::notEqualTests::test_ne",
"tests/test_tensor.py::notEqualTests::test_ne_",
"tests/test_tensor.py::index_selectTests::test_index_select",
"tests/test_tensor.py::gatherTests::test_gather_numerical_1",
"tests/test_tensor.py::gatherTests::test_gather_numerical_2",
"tests/test_tensor.py::scatterTests::test_scatter_dim_out_Of_range",
"tests/test_tensor.py::scatterTests::test_scatter_index_out_of_range",
"tests/test_tensor.py::scatterTests::test_scatter_index_src_dimension_mismatch",
"tests/test_tensor.py::scatterTests::test_scatter_index_type",
"tests/test_tensor.py::scatterTests::test_scatter_numerical_0",
"tests/test_tensor.py::scatterTests::test_scatter_numerical_1",
"tests/test_tensor.py::scatterTests::test_scatter_numerical_2",
"tests/test_tensor.py::scatterTests::test_scatter_numerical_3",
"tests/test_tensor.py::scatterTests::test_scatter_numerical_4",
"tests/test_tensor.py::scatterTests::test_scatter_numerical_5",
"tests/test_tensor.py::scatterTests::test_scatter_numerical_6",
"tests/test_tensor.py::remainderTests::test_remainder_",
"tests/test_tensor.py::remainderTests::test_remainder_broadcasting",
"tests/test_tensor.py::testMv::test_mv",
"tests/test_tensor.py::testMv::test_mv_tensor",
"tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_1",
"tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_1",
"tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_2",
"tests/test_tensor.py::masked_fill_Tests::test_masked_fill_",
"tests/test_tensor.py::masked_fill_Tests::test_masked_fill_broadcasting",
"tests/test_tensor.py::masked_select_Tests::test_masked_select",
"tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_1",
"tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_2",
"tests/test_tensor.py::masked_select_Tests::test_tensor_base_masked_select",
"tests/test_tensor.py::eqTests::test_eq_in_place_with_number",
"tests/test_tensor.py::eqTests::test_eq_in_place_with_tensor",
"tests/test_tensor.py::eqTests::test_eq_with_number",
"tests/test_tensor.py::eqTests::test_eq_with_tensor",
"tests/test_tensor.py::mm_test::test_mm_1d",
"tests/test_tensor.py::mm_test::test_mm_2d",
"tests/test_tensor.py::mm_test::test_mm_3d",
"tests/test_tensor.py::fmodTest::test_fmod_number",
"tests/test_tensor.py::fmodTest::test_fmod_tensor",
"tests/test_tensor.py::fmod_Test::test_fmod_number",
"tests/test_tensor.py::fmod_Test::test_fmod_tensor"
] | [] | Apache License 2.0 | 1,729 | 298 | [
"syft/tensor.py"
] |
|
oasis-open__cti-taxii-client-11 | 405bbbaa58d86371adc401ee4fe8830f429fb6b2 | 2017-10-04 14:31:24 | 7bfafa96153442b8dac5fe643e256127ec6304c5 | diff --git a/taxii2client/__init__.py b/taxii2client/__init__.py
index bee06a1..0efc770 100644
--- a/taxii2client/__init__.py
+++ b/taxii2client/__init__.py
@@ -478,7 +478,7 @@ class _HTTPConnection(object):
resp.raise_for_status()
content_type = resp.headers['Content-Type']
- if content_type != accept:
+ if not content_type.startswith(accept):
msg = "Unexpected Response Content-Type: {}"
raise TAXIIServiceException(msg.format(content_type))
| Accept more flexible content type strings
When checking the content type of packets the client receives, it checks if it is an exact match (https://github.com/oasis-open/cti-taxii-client/blob/master/taxii2client/__init__.py#L481). This fails if for example "; charset=utf-8" is appended to the content type. | oasis-open/cti-taxii-client | diff --git a/taxii2client/test/test_client.py b/taxii2client/test/test_client.py
index 597ebb3..3a02747 100644
--- a/taxii2client/test/test_client.py
+++ b/taxii2client/test/test_client.py
@@ -3,7 +3,7 @@ import responses
from taxii2client import (
MEDIA_TYPE_STIX_V20, MEDIA_TYPE_TAXII_V20, AccessError, ApiRoot,
- Collection, Server
+ Collection, Server, TAXIIServiceException
)
TAXII_SERVER = 'example.com'
@@ -394,3 +394,23 @@ def test_get_status(api_root):
assert len(status.failures) == 1
assert status.pending_count == 2
assert len(status.pendings) == 2
+
+
[email protected]
+def test_content_type_valid(collection):
+ responses.add(responses.GET, GET_OBJECT_URL, GET_OBJECT_RESPONSE,
+ status=200, content_type="%s; charset=utf-8" % MEDIA_TYPE_STIX_V20)
+
+ response = collection.get_object('indicator--252c7c11-daf2-42bd-843b-be65edca9f61')
+ indicator = response['objects'][0]
+ assert indicator['id'] == 'indicator--252c7c11-daf2-42bd-843b-be65edca9f61'
+
+
[email protected]
+def test_content_type_invalid(collection):
+ responses.add(responses.GET, GET_OBJECT_URL, GET_OBJECT_RESPONSE,
+ status=200, content_type="taxii")
+
+ with pytest.raises(TAXIIServiceException) as excinfo:
+ collection.get_object('indicator--252c7c11-daf2-42bd-843b-be65edca9f61')
+ assert "Unexpected Response Content-Type" in str(excinfo.value)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"responses",
"tox"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
distlib==0.3.9
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
requests==2.27.1
responses==0.17.0
six==1.17.0
-e git+https://github.com/oasis-open/cti-taxii-client.git@405bbbaa58d86371adc401ee4fe8830f429fb6b2#egg=taxii2_client
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: cti-taxii-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- requests==2.27.1
- responses==0.17.0
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/cti-taxii-client
| [
"taxii2client/test/test_client.py::test_content_type_valid"
] | [] | [
"taxii2client/test/test_client.py::test_server_discovery",
"taxii2client/test/test_client.py::test_minimal_discovery_response",
"taxii2client/test/test_client.py::test_discovery_with_no_default",
"taxii2client/test/test_client.py::test_api_root",
"taxii2client/test/test_client.py::test_api_root_collections",
"taxii2client/test/test_client.py::test_collection",
"taxii2client/test/test_client.py::test_collection_unexpected_kwarg",
"taxii2client/test/test_client.py::test_get_collection_objects",
"taxii2client/test/test_client.py::test_get_object",
"taxii2client/test/test_client.py::test_cannot_write_to_readonly_collection",
"taxii2client/test/test_client.py::test_add_object_to_collection",
"taxii2client/test/test_client.py::test_cannot_read_from_writeonly_collection",
"taxii2client/test/test_client.py::test_get_manifest",
"taxii2client/test/test_client.py::test_get_status",
"taxii2client/test/test_client.py::test_content_type_invalid"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,730 | 147 | [
"taxii2client/__init__.py"
] |
|
dpkp__kafka-python-1239 | cec1bdc9965b3d6729d4415e31b4dac04d603873 | 2017-10-05 17:16:48 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py
index b86c8ec..f552038 100644
--- a/kafka/consumer/fetcher.py
+++ b/kafka/consumer/fetcher.py
@@ -923,12 +923,17 @@ class Fetcher(six.Iterator):
self._sensors.fetch_throttle_time_sensor.record(response.throttle_time_ms)
self._sensors.fetch_latency.record((recv_time - send_time) * 1000)
- class PartitionRecords(six.Iterator):
+ class PartitionRecords(object):
def __init__(self, fetch_offset, tp, messages):
self.fetch_offset = fetch_offset
self.topic_partition = tp
self.messages = messages
- self.message_idx = 0
+ # When fetching an offset that is in the middle of a
+ # compressed batch, we will get all messages in the batch.
+ # But we want to start 'take' at the fetch_offset
+ for i, msg in enumerate(messages):
+ if msg.offset == fetch_offset:
+ self.message_idx = i
def discard(self):
self.messages = None
| Seek method returning incorrect messages on compressed topic when using max_poll_records
While using seek method of `kafka.consumer.group.seek' for a given partition, offset, we are seeing the inconsistent behavior for the messages returned with the subsequent poll method.
The issue is easily reproducible for the given topic (compacted).
Part of Workflow:
```
from kafka.consumer.group import KafkaConsumer
topic_partition = TopicPartition(topic, 0)
consumer = KafkaConsumer(*consumer_config)
consumer.assign([topic_partition])
start_offset = 100 # Example value: highwatermark - 10
consumer.seek(partition=topic_partition, offset=start_offset)
messages = consumer.poll(timeout_ms=1000, max_records=1)[topic_partition]
message = messages[0]
print('Offset found:', message.offset, 'Expected offset:', start_offset)
Sample Output:
$ Offset found:80 Expected offset:100
```
Observation:
* If iterator interface is used instead of poll interface, the issue no longer exists. My guess is somewhere while polling for messages, the fetched offsets are not updated or fetched messages are not skipped. It looks like iterator method is not using fetched_records api that's why it works fine.
* At times it does give correct messages (especially when given offset is closer to highwatermark)
Please let me know if any other details are required.
| dpkp/kafka-python | diff --git a/test/test_fetcher.py b/test/test_fetcher.py
index 64eec1b..86d154f 100644
--- a/test/test_fetcher.py
+++ b/test/test_fetcher.py
@@ -7,7 +7,7 @@ import itertools
from collections import OrderedDict
from kafka.client_async import KafkaClient
-from kafka.consumer.fetcher import Fetcher, NoOffsetForPartitionError
+from kafka.consumer.fetcher import ConsumerRecord, Fetcher, NoOffsetForPartitionError
from kafka.consumer.subscription_state import SubscriptionState
from kafka.metrics import Metrics
from kafka.protocol.fetch import FetchRequest
@@ -282,3 +282,26 @@ def test__handle_offset_response(fetcher, mocker):
fetcher._handle_offset_response(fut, res)
assert fut.failed()
assert isinstance(fut.exception, NotLeaderForPartitionError)
+
+
+def test_partition_records_offset():
+ """Test that compressed messagesets are handle correctly
+ when fetch offset is in the middle of the message list
+ """
+ batch_start = 120
+ batch_end = 130
+ fetch_offset = 123
+ tp = TopicPartition('foo', 0)
+ messages = [ConsumerRecord(tp.topic, tp.partition, i,
+ None, None, 'key', 'value', 'checksum', 0, 0)
+ for i in range(batch_start, batch_end)]
+ records = Fetcher.PartitionRecords(fetch_offset, None, messages)
+ assert records.has_more()
+ msgs = records.take(1)
+ assert msgs[0].offset == 123
+ assert records.fetch_offset == 124
+ msgs = records.take(2)
+ assert len(msgs) == 2
+ assert records.has_more()
+ records.discard()
+ assert not records.has_more()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog",
"pytest-sugar",
"pytest-mock",
"mock",
"python-snappy",
"lz4",
"xxhash"
],
"pre_install": [
"apt-get update",
"apt-get install -y libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@cec1bdc9965b3d6729d4415e31b4dac04d603873#egg=kafka_python
lz4==3.1.10
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- lz4==3.1.10
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_fetcher.py::test_partition_records_offset"
] | [] | [
"test/test_fetcher.py::test_send_fetches",
"test/test_fetcher.py::test_create_fetch_requests[api_version0-3]",
"test/test_fetcher.py::test_create_fetch_requests[api_version1-2]",
"test/test_fetcher.py::test_create_fetch_requests[api_version2-1]",
"test/test_fetcher.py::test_create_fetch_requests[api_version3-0]",
"test/test_fetcher.py::test_update_fetch_positions",
"test/test_fetcher.py::test__reset_offset",
"test/test_fetcher.py::test__send_offset_requests",
"test/test_fetcher.py::test__send_offset_requests_multiple_nodes",
"test/test_fetcher.py::test__handle_offset_response"
] | [] | Apache License 2.0 | 1,735 | 274 | [
"kafka/consumer/fetcher.py"
] |
|
pimutils__khal-720 | e4fe38059c109c0d6efdec81c98e4e8abe80b2a2 | 2017-10-05 20:52:23 | 79fd6ea2535a7e6b1f7a4fd532a932aafa1e86df | diff --git a/khal/ui/editor.py b/khal/ui/editor.py
index 4333004..94cadca 100644
--- a/khal/ui/editor.py
+++ b/khal/ui/editor.py
@@ -235,7 +235,7 @@ class StartEndEditor(urwid.WidgetWrap):
return endval
def _end_date_change(self, date):
- self._enddt = self.localize_end(dt.datetime.combine(date, self._start_time))
+ self._enddt = self.localize_end(dt.datetime.combine(date, self._end_time))
self.on_end_date_change(date)
def toggle(self, checkbox, state):
@@ -277,25 +277,23 @@ class StartEndEditor(urwid.WidgetWrap):
self.widgets.endtime = urwid.Text('')
elif state is False:
timewidth = self._timewidth + 1
- edit = ValidatedEdit(
+ raw_start_time_widget = ValidatedEdit(
dateformat=self.conf['locale']['timeformat'],
EditWidget=TimeWidget,
validate=self._validate_start_time,
edit_text=self.startdt.strftime(self.conf['locale']['timeformat']),
)
- edit = urwid.Padding(
- edit, align='left', width=self._timewidth + 1, left=1)
- self.widgets.starttime = edit
+ self.widgets.starttime = urwid.Padding(
+ raw_start_time_widget, align='left', width=self._timewidth + 1, left=1)
- edit = ValidatedEdit(
+ raw_end_time_widget = ValidatedEdit(
dateformat=self.conf['locale']['timeformat'],
EditWidget=TimeWidget,
validate=self._validate_end_time,
edit_text=self.enddt.strftime(self.conf['locale']['timeformat']),
)
- edit = urwid.Padding(
- edit, align='left', width=self._timewidth + 1, left=1)
- self.widgets.endtime = edit
+ self.widgets.endtime = urwid.Padding(
+ raw_end_time_widget, align='left', width=self._timewidth + 1, left=1)
columns = NPile([
self.checkallday,
| Double issue: invalid time range parsing + warning message display
Two problems are shown in the attached screenshot:
- when entering this event (using only default values, especially the start and end hours 8:00 to 9:00), the time range is not understood by khal
- warning messages are displayed (very shortly, less than 1 sec) in the middle of the screen, and are erased as soon as the screen is being refreshed, which prevents the user to see them

| pimutils/khal | diff --git a/tests/ui/test_editor.py b/tests/ui/test_editor.py
index 080a00d..27b7fa5 100644
--- a/tests/ui/test_editor.py
+++ b/tests/ui/test_editor.py
@@ -4,12 +4,21 @@ import icalendar
from khal.ui.editor import RecurrenceEditor, StartEndEditor
from ..utils import BERLIN, LOCALE_BERLIN
+from .canvas_render import CanvasTranslator
CONF = {'locale': LOCALE_BERLIN, 'keybindings': {}, 'view': {'monthdisplay': 'firstday'}}
START = BERLIN.localize(dt.datetime(2015, 4, 26, 22, 23))
END = BERLIN.localize(dt.datetime(2015, 4, 27, 23, 23))
+palette = {
+ 'date header focused': 'blue',
+ 'date header': 'green',
+ 'default': 'black',
+ 'editf': 'red',
+ 'edit': 'blue',
+}
+
def test_popup(monkeypatch):
"""making sure the popup calendar gets callend with the right inital value
@@ -65,3 +74,60 @@ def test_check_understood_rrule():
assert not RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=TH;BYSETPOS=3')
)
+
+
+def test_editor():
+ """test for the issue in #666"""
+ editor = StartEndEditor(
+ BERLIN.localize(dt.datetime(2017, 10, 2, 13)),
+ BERLIN.localize(dt.datetime(2017, 10, 4, 18)),
+ conf=CONF
+ )
+ assert editor.startdt == BERLIN.localize(dt.datetime(2017, 10, 2, 13))
+ assert editor.enddt == BERLIN.localize(dt.datetime(2017, 10, 4, 18))
+ assert editor.changed is False
+ for _ in range(3):
+ editor.keypress((10, ), 'tab')
+ for _ in range(3):
+ editor.keypress((10, ), 'shift tab')
+ assert editor.startdt == BERLIN.localize(dt.datetime(2017, 10, 2, 13))
+ assert editor.enddt == BERLIN.localize(dt.datetime(2017, 10, 4, 18))
+ assert editor.changed is False
+
+
+def test_convert_to_date():
+ """test for the issue in #666"""
+ editor = StartEndEditor(
+ BERLIN.localize(dt.datetime(2017, 10, 2, 13)),
+ BERLIN.localize(dt.datetime(2017, 10, 4, 18)),
+ conf=CONF
+ )
+ canvas = editor.render((50, ), True)
+ assert CanvasTranslator(canvas, palette).transform() == (
+ '[ ] Allday\nFrom: \x1b[31m2.10.2017 \x1b[0m \x1b[34m13:00 \x1b[0m\n'
+ 'To: \x1b[34m04.10.2017\x1b[0m \x1b[34m18:00 \x1b[0m\n'
+ )
+
+ assert editor.startdt == BERLIN.localize(dt.datetime(2017, 10, 2, 13))
+ assert editor.enddt == BERLIN.localize(dt.datetime(2017, 10, 4, 18))
+ assert editor.changed is False
+ assert editor.allday is False
+
+ # set to all day event
+ editor.keypress((10, ), 'shift tab')
+ editor.keypress((10, ), ' ')
+ for _ in range(3):
+ editor.keypress((10, ), 'tab')
+ for _ in range(3):
+ editor.keypress((10, ), 'shift tab')
+
+ canvas = editor.render((50, ), True)
+ assert CanvasTranslator(canvas, palette).transform() == (
+ '[X] Allday\nFrom: \x1b[34m02.10.2017\x1b[0m \n'
+ 'To: \x1b[34m04.10.2017\x1b[0m \n'
+ )
+
+ assert editor.changed is True
+ assert editor.allday is True
+ assert editor.startdt == dt.date(2017, 10, 2)
+ assert editor.enddt == dt.date(2017, 10, 4)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"freezegun",
"vdirsyncer"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiosignal==1.3.2
aiostream==0.4.5
async-timeout==5.0.1
atomicwrites==1.4.1
attrs==25.3.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-log==0.4.0
configobj==5.0.9
coverage==7.8.0
exceptiongroup==1.2.2
freezegun==1.5.1
frozenlist==1.5.0
icalendar==6.1.3
idna==3.10
iniconfig==2.1.0
-e git+https://github.com/pimutils/khal.git@e4fe38059c109c0d6efdec81c98e4e8abe80b2a2#egg=khal
multidict==6.2.0
packaging==24.2
pluggy==1.5.0
propcache==0.3.1
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
pyxdg==0.28
requests==2.32.3
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
tzdata==2025.2
tzlocal==5.3.1
urllib3==2.3.0
urwid==2.6.16
vdirsyncer==0.19.3
wcwidth==0.2.13
yarl==1.18.3
| name: khal
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiosignal==1.3.2
- aiostream==0.4.5
- async-timeout==5.0.1
- atomicwrites==1.4.1
- attrs==25.3.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-log==0.4.0
- configobj==5.0.9
- coverage==7.8.0
- exceptiongroup==1.2.2
- freezegun==1.5.1
- frozenlist==1.5.0
- icalendar==6.1.3
- idna==3.10
- iniconfig==2.1.0
- multidict==6.2.0
- packaging==24.2
- pluggy==1.5.0
- propcache==0.3.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyxdg==0.28
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- tzdata==2025.2
- tzlocal==5.3.1
- urllib3==2.3.0
- urwid==2.6.16
- vdirsyncer==0.19.3
- wcwidth==0.2.13
- yarl==1.18.3
prefix: /opt/conda/envs/khal
| [
"tests/ui/test_editor.py::test_editor"
] | [
"tests/ui/test_editor.py::test_popup"
] | [
"tests/ui/test_editor.py::test_check_understood_rrule",
"tests/ui/test_editor.py::test_convert_to_date"
] | [] | MIT License | 1,737 | 490 | [
"khal/ui/editor.py"
] |
|
oasis-open__cti-python-stix2-74 | 3c80e5e7ebb641b5feb55337f9470ec9d9d58572 | 2017-10-09 21:36:36 | ef6dade6f6773edd14aa16a2e4566e50bf74cbb4 | diff --git a/stix2/core.py b/stix2/core.py
index 3eaabb0..8ee11f5 100644
--- a/stix2/core.py
+++ b/stix2/core.py
@@ -15,6 +15,10 @@ from .utils import get_dict
class STIXObjectProperty(Property):
+ def __init__(self, allow_custom=False):
+ self.allow_custom = allow_custom
+ super(STIXObjectProperty, self).__init__()
+
def clean(self, value):
try:
dictified = get_dict(value)
@@ -25,7 +29,10 @@ class STIXObjectProperty(Property):
if 'type' in dictified and dictified['type'] == 'bundle':
raise ValueError('This property may not contain a Bundle object')
- parsed_obj = parse(dictified)
+ if self.allow_custom:
+ parsed_obj = parse(dictified, allow_custom=True)
+ else:
+ parsed_obj = parse(dictified)
return parsed_obj
@@ -48,6 +55,10 @@ class Bundle(_STIXBase):
else:
kwargs['objects'] = list(args) + kwargs.get('objects', [])
+ allow_custom = kwargs.get('allow_custom', False)
+ if allow_custom:
+ self._properties['objects'] = ListProperty(STIXObjectProperty(True))
+
super(Bundle, self).__init__(**kwargs)
diff --git a/stix2/properties.py b/stix2/properties.py
index afe994f..ca7f04c 100644
--- a/stix2/properties.py
+++ b/stix2/properties.py
@@ -124,7 +124,11 @@ class ListProperty(Property):
obj_type = self.contained.type
elif type(self.contained).__name__ is 'STIXObjectProperty':
# ^ this way of checking doesn't require a circular import
- obj_type = type(valid)
+ # valid is already an instance of a python-stix2 class; no need
+ # to turn it into a dictionary and then pass it to the class
+ # constructor again
+ result.append(valid)
+ continue
else:
obj_type = self.contained
| Unable to creat 'Bundle' when using custom fields with SDO
Hi,
When attempting to generate a bundle, a failure message is created when passing an SDO with custom objects even with `allow_custom=True` set on the SDO object.
example:
`v = factory.create(
Vulnerability,
name="Test Vulnerability",
custom_field = "This is custom",
allow_custom=True
)`
`print Bundle(v)`
Will result in the following output:
`File "stix.py", line 142, in <module>
print Bundle(v)
File "/usr/local/lib/python2.7/dist-packages/stix2/core.py", line 51, in __init__
super(Bundle, self).__init__(**kwargs)
File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 121, in __init__
self._check_property(prop_name, prop_metadata, setting_kwargs)
File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 55, in _check_property
kwargs[prop_name] = prop.clean(kwargs[prop_name])
File "/usr/local/lib/python2.7/dist-packages/stix2/properties.py", line 115, in clean
valid = self.contained.clean(item)
File "/usr/local/lib/python2.7/dist-packages/stix2/core.py", line 28, in clean
parsed_obj = parse(dictified)
File "/usr/local/lib/python2.7/dist-packages/stix2/core.py", line 94, in parse
return obj_class(allow_custom=allow_custom, **obj)
File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 104, in __init__
raise ExtraPropertiesError(cls, extra_kwargs)
stix2.exceptions.ExtraPropertiesError: Unexpected properties for Vulnerability: (custom_field).`
| oasis-open/cti-python-stix2 | diff --git a/stix2/test/test_custom.py b/stix2/test/test_custom.py
index ff432c1..48529b9 100644
--- a/stix2/test/test_custom.py
+++ b/stix2/test/test_custom.py
@@ -81,6 +81,18 @@ def test_parse_identity_custom_property(data):
assert identity.foo == "bar"
+def test_custom_property_in_bundled_object():
+ identity = stix2.Identity(
+ name="John Smith",
+ identity_class="individual",
+ x_foo="bar",
+ allow_custom=True,
+ )
+ bundle = stix2.Bundle(identity, allow_custom=True)
+
+ assert bundle.objects[0].x_foo == "bar"
+
+
@stix2.sdo.CustomObject('x-new-type', [
('property1', stix2.properties.StringProperty(required=True)),
('property2', stix2.properties.IntegerProperty()),
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install tox-travis pre-commit"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
antlr4-python3-runtime==4.9.3
appdirs==1.4.4
async-generator==1.10
attrs==21.4.0
Babel==2.11.0
bleach==4.1.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
cpe==1.3.1
decorator==5.1.1
defusedxml==0.7.1
distlib==0.3.9
docutils==0.18.1
entrypoints==0.4
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
ipython-genutils==0.2.0
itsdangerous==2.0.1
Jinja2==3.0.3
jsonpointer==2.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
nodeenv==1.6.0
packaging==21.3
pandocfilters==1.5.1
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
requests==2.27.1
requests-cache==0.7.5
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-prompt==1.5.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/oasis-open/cti-python-stix2.git@3c80e5e7ebb641b5feb55337f9470ec9d9d58572#egg=stix2
stix2-patterns==2.0.0
stix2-validator==3.0.2
taxii2-client==2.3.0
testpath==0.6.0
toml==0.10.2
tomli==1.2.3
tornado==6.1
tox==3.28.0
tox-travis==0.13
traitlets==4.3.3
typing_extensions==4.1.1
url-normalize==1.4.3
urllib3==1.26.20
virtualenv==20.16.2
webcolors==1.11.1
webencodings==0.5.1
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- antlr4-python3-runtime==4.9.3
- appdirs==1.4.4
- async-generator==1.10
- attrs==21.4.0
- babel==2.11.0
- bleach==4.1.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- cpe==1.3.1
- decorator==5.1.1
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.18.1
- entrypoints==0.4
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- ipython-genutils==0.2.0
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonpointer==2.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- nodeenv==1.6.0
- packaging==21.3
- pandocfilters==1.5.1
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- requests==2.27.1
- requests-cache==0.7.5
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-prompt==1.5.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- stix2-patterns==2.0.0
- stix2-validator==3.0.2
- taxii2-client==2.3.0
- testpath==0.6.0
- toml==0.10.2
- tomli==1.2.3
- tornado==6.1
- tox==3.28.0
- tox-travis==0.13
- traitlets==4.3.3
- typing-extensions==4.1.1
- url-normalize==1.4.3
- urllib3==1.26.20
- virtualenv==20.16.2
- webcolors==1.11.1
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_custom.py::test_custom_property_in_bundled_object"
] | [] | [
"stix2/test/test_custom.py::test_identity_custom_property",
"stix2/test/test_custom.py::test_identity_custom_property_invalid",
"stix2/test/test_custom.py::test_identity_custom_property_allowed",
"stix2/test/test_custom.py::test_parse_identity_custom_property[{\\n",
"stix2/test/test_custom.py::test_custom_object_type",
"stix2/test/test_custom.py::test_custom_object_no_init",
"stix2/test/test_custom.py::test_parse_custom_object_type",
"stix2/test/test_custom.py::test_parse_unregistered_custom_object_type",
"stix2/test/test_custom.py::test_custom_observable_object",
"stix2/test/test_custom.py::test_custom_observable_object_no_init",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_ref_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_list_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_valid_refs",
"stix2/test/test_custom.py::test_custom_no_properties_raises_exception",
"stix2/test/test_custom.py::test_custom_wrong_properties_arg_raises_exception",
"stix2/test/test_custom.py::test_parse_custom_observable_object",
"stix2/test/test_custom.py::test_parse_unregistered_custom_observable_object",
"stix2/test/test_custom.py::test_parse_invalid_custom_observable_object",
"stix2/test/test_custom.py::test_observable_custom_property",
"stix2/test/test_custom.py::test_observable_custom_property_invalid",
"stix2/test/test_custom.py::test_observable_custom_property_allowed",
"stix2/test/test_custom.py::test_observed_data_with_custom_observable_object",
"stix2/test/test_custom.py::test_custom_extension",
"stix2/test/test_custom.py::test_custom_extension_wrong_observable_type",
"stix2/test/test_custom.py::test_custom_extension_invalid_observable",
"stix2/test/test_custom.py::test_custom_extension_no_properties",
"stix2/test/test_custom.py::test_custom_extension_empty_properties",
"stix2/test/test_custom.py::test_custom_extension_no_init",
"stix2/test/test_custom.py::test_parse_observable_with_custom_extension",
"stix2/test/test_custom.py::test_parse_observable_with_unregistered_custom_extension"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,745 | 510 | [
"stix2/core.py",
"stix2/properties.py"
] |
|
CartoDB__cartoframes-241 | a694cfa6e9f7ff39954ef5045649eb2518632338 | 2017-10-10 19:24:12 | 39c14bf3ca697c536823d53d6179fb2ce3bae4b9 | diff --git a/cartoframes/context.py b/cartoframes/context.py
index e5885a97..c88bee6f 100644
--- a/cartoframes/context.py
+++ b/cartoframes/context.py
@@ -18,7 +18,7 @@ from carto.sql import SQLClient, BatchSQLClient
from carto.exceptions import CartoException
from .credentials import Credentials
-from .utils import dict_items, normalize_colnames, norm_colname
+from .utils import dict_items, normalize_colnames, norm_colname, join_url
from .layer import BaseMap
from .maps import non_basemap_layers, get_map_name, get_map_template
@@ -217,7 +217,7 @@ class CartoContext(object):
'minutes.\n'
'\033[1mNote:\033[0m `CartoContext.map` will not work on '
'this table until its geometries are created.'.format(
- table_url='/'.join((self.creds.base_url(),
+ table_url=join_url((self.creds.base_url(),
'dataset',
final_table_name, )),
job_id=status.get('job_id'),
@@ -227,7 +227,7 @@ class CartoContext(object):
self.sql_client.send(query)
tqdm.write('Table successfully written to CARTO: {table_url}'.format(
- table_url='/'.join((self.creds.base_url(),
+ table_url=join_url((self.creds.base_url(),
'dataset',
final_table_name, ))))
@@ -679,7 +679,7 @@ class CartoContext(object):
elif not base_layers:
# default basemap is dark with labels in back
# labels will be changed if all geoms are non-point
- layers.insert(0, BaseMap(source='dark', labels='back'))
+ layers.insert(0, BaseMap())
geoms = set()
# Setup layers
@@ -734,7 +734,7 @@ class CartoContext(object):
options.update(self._get_bounds(nb_layers))
map_name = self._send_map_template(layers, has_zoom=has_zoom)
- api_url = '/'.join((self.creds.base_url(), 'api/v1/map', ))
+ api_url = join_url((self.creds.base_url(), 'api/v1/map', ))
static_url = ('{api_url}/static/named/{map_name}'
'/{width}/{height}.png?{params}').format(
diff --git a/cartoframes/layer.py b/cartoframes/layer.py
index 821390a1..6ae19117 100644
--- a/cartoframes/layer.py
+++ b/cartoframes/layer.py
@@ -6,7 +6,7 @@ for example usage.
import pandas as pd
import webcolors
-from cartoframes.utils import cssify
+from cartoframes.utils import cssify, join_url
from cartoframes.styling import BinMethod, mint, antique, get_scheme_cartocss
# colors map data layers without color specified
@@ -53,21 +53,30 @@ class BaseMap(AbstractLayer):
"""
is_basemap = True
- def __init__(self, source='dark', labels='back', only_labels=False):
+ def __init__(self, source='voyager', labels='back', only_labels=False):
if labels not in ('front', 'back', None):
raise ValueError("labels must be None, 'front', or 'back'")
self.source = source
self.labels = labels
+ stem = 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
+ if source == 'voyager':
+ stem += 'rastertiles'
if self.is_basic():
if only_labels:
style = source + '_only_labels'
else:
- style = source + ('_all' if labels == 'back' else '_nolabels')
-
- self.url = ('https://cartodb-basemaps-{{s}}.global.ssl.fastly.net/'
- '{style}/{{z}}/{{x}}/{{y}}.png').format(style=style)
+ if source in ('dark', 'light', ):
+ label_type = '_all'
+ else:
+ label_type = '_labels_under'
+ style = source + (label_type if labels == 'back'
+ else '_nolabels')
+ self.url = join_url((stem,
+ '{style}/{{z}}/{{x}}/{{y}}.png'.format(
+ style=style)
+ ))
elif self.source.startswith('http'):
# TODO: Remove this once baselayer urls can be passed in named
# map config
@@ -75,16 +84,17 @@ class BaseMap(AbstractLayer):
'moment')
# self.url = source
else:
- raise ValueError("`source` must be one of 'dark' or 'light'")
+ raise ValueError("`source` must be one of 'dark', 'light', or "
+ "'voyager'")
def is_basic(self):
"""Does BaseMap pull from CARTO default basemaps?
Returns:
- bool: `True` if using a CARTO basemap (Dark Matter or Positron),
- `False` otherwise.
+ bool: `True` if using a CARTO basemap (Dark Matter, Positron or
+ Voyager), `False` otherwise.
"""
- return self.source in ('dark', 'light')
+ return self.source in ('dark', 'light', 'voyager', )
class QueryLayer(AbstractLayer):
diff --git a/cartoframes/maps.py b/cartoframes/maps.py
index b5d33ee0..701cdc1f 100644
--- a/cartoframes/maps.py
+++ b/cartoframes/maps.py
@@ -19,6 +19,7 @@ def get_map_name(layers, has_zoom):
num_layers = len(non_basemap_layers(layers))
has_labels = len(layers) > 1 and layers[-1].is_basemap
has_time = has_time_layer(layers)
+ basemap_id = dict(light=0, dark=1, voyager=2)[layers[0].source]
return ('cartoframes_ver{version}'
'_layers{layers}'
@@ -31,7 +32,7 @@ def get_map_name(layers, has_zoom):
has_time=('1' if has_time else '0'),
# TODO: Remove this once baselayer urls can be passed in named
# map config
- baseid=('1' if layers[0].source == 'dark' else '0'),
+ baseid=basemap_id,
has_labels=('1' if has_labels else '0'),
has_zoom=('1' if has_zoom else '0')
)
diff --git a/cartoframes/utils.py b/cartoframes/utils.py
index ae5750e4..5f8b95f3 100644
--- a/cartoframes/utils.py
+++ b/cartoframes/utils.py
@@ -73,3 +73,8 @@ def norm_colname(colname):
if final_name[0].isdigit():
return '_' + final_name
return final_name
+
+
+def join_url(parts):
+ """join parts of URL into complete url"""
+ return '/'.join(s.strip('/') for s in parts)
| include voyager as a basemap option
e.g.,
```
https://cartodb-basemaps-a.global.ssl.fastly.net/rastertiles/voyager_nolabels/{z}/{x}/{y}.png
``` | CartoDB/cartoframes | diff --git a/test/test_context.py b/test/test_context.py
index b8abcd28..a5654d01 100644
--- a/test/test_context.py
+++ b/test/test_context.py
@@ -16,6 +16,7 @@ from carto.sql import SQLClient
import pandas as pd
WILL_SKIP = False
+warnings.filterwarnings("ignore")
class TestCartoContext(unittest.TestCase):
@@ -533,21 +534,21 @@ class TestCartoContext(unittest.TestCase):
# baseid1 = dark, labels1 = labels on top in named map name
labels_polygon = cc.map(layers=Layer(self.test_read_table))
self.assertRegexpMatches(labels_polygon.__html__(),
- '.*baseid1_labels1.*',
+ '.*baseid2_labels1.*',
msg='labels should be on top since only a '
'polygon layer is present')
- # baseid1 = dark, labels0 = labels on bottom
+ # baseid2 = voyager, labels0 = labels on bottom
labels_point = cc.map(layers=Layer(self.test_point_table))
self.assertRegexpMatches(labels_point.__html__(),
- '.*baseid1_labels0.*',
+ '.*baseid2_labels0.*',
msg='labels should be on bottom because a '
'point layer is present')
labels_multi = cc.map(layers=[Layer(self.test_point_table),
Layer(self.test_read_table)])
self.assertRegexpMatches(labels_multi.__html__(),
- '.*baseid1_labels0.*',
+ '.*baseid2_labels0.*',
msg='labels should be on bottom because a '
'point layer is present')
# create a layer with points and polys, but with more polys
@@ -566,7 +567,7 @@ class TestCartoContext(unittest.TestCase):
points=self.test_point_table))
multi_geom = cc.map(layers=multi_geom_layer)
self.assertRegexpMatches(multi_geom.__html__(),
- '.*baseid1_labels1.*',
+ '.*baseid2_labels1.*',
msg='layer has more polys than points, so it '
'should default to polys labels (on top)')
diff --git a/test/test_layer.py b/test/test_layer.py
index 428c88a5..e13f140e 100644
--- a/test/test_layer.py
+++ b/test/test_layer.py
@@ -15,18 +15,23 @@ class TestBaseMap(unittest.TestCase):
# basemaps with baked-in labels
self.dark_map_all = BaseMap(source='dark')
self.light_map_all = BaseMap(source='light')
+ self.voyager_labels_under = BaseMap(source='voyager')
# basemaps with no labels
self.dark_map_no_labels = BaseMap(source='dark',
labels=None)
self.light_map_no_labels = BaseMap(source='light',
labels=None)
+ self.voyager_map_no_labels = BaseMap(source='voyager',
+ labels=None)
# labels with no basemaps
self.dark_only_labels = BaseMap(source='dark',
only_labels=True)
self.light_only_labels = BaseMap(source='light',
only_labels=True)
+ self.voyager_only_labels = BaseMap(source='voyager',
+ only_labels=True)
def test_basemap_invalid(self):
"""layer.Basemap exceptions on invalid source"""
@@ -53,23 +58,34 @@ class TestBaseMap(unittest.TestCase):
self.assertEqual(self.light_map_all.url,
'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
'light_all/{z}/{x}/{y}.png')
+ self.assertEqual(self.voyager_labels_under.url,
+ 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
+ 'rastertiles/voyager_labels_under/{z}/{x}/{y}.png')
self.assertEqual(self.dark_map_no_labels.url,
'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
'dark_nolabels/{z}/{x}/{y}.png')
self.assertEqual(self.light_map_no_labels.url,
'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
'light_nolabels/{z}/{x}/{y}.png')
+ self.assertEqual(self.voyager_map_no_labels.url,
+ 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
+ 'rastertiles/voyager_nolabels/{z}/{x}/{y}.png')
self.assertEqual(self.light_only_labels.url,
'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
'light_only_labels/{z}/{x}/{y}.png')
self.assertEqual(self.dark_only_labels.url,
'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
'dark_only_labels/{z}/{x}/{y}.png')
+ self.assertEqual(self.voyager_only_labels.url,
+ 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/'
+ 'rastertiles/voyager_only_labels/{z}/{x}/{y}.png')
# ensure self.is_basic() works as intended
self.assertTrue(self.light_map_all.is_basic(),
msg='is a basic carto basemap')
self.assertTrue(self.dark_map_all.is_basic())
+ self.assertTrue(self.voyager_labels_under.is_basic(),
+ msg='is a basic carto basemap')
class TestQueryLayer(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 0.2 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
attrs==22.2.0
backcall==0.2.0
carto==1.11.3
-e git+https://github.com/CartoDB/cartoframes.git@a694cfa6e9f7ff39954ef5045649eb2518632338#egg=cartoframes
certifi==2021.5.30
charset-normalizer==2.0.12
cov-core==1.15.0
coverage==6.2
decorator==5.1.1
future==1.0.0
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
nose==1.3.7
nose-cov==1.6
numpy==1.19.5
packaging==21.3
pandas==1.1.5
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrestcli==0.6.11
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
Shapely==1.8.5.post1
six==1.17.0
tomli==1.2.3
tqdm==4.64.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webcolors==1.7
zipp==3.6.0
| name: cartoframes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- attrs==22.2.0
- backcall==0.2.0
- carto==1.11.3
- charset-normalizer==2.0.12
- cov-core==1.15.0
- coverage==6.2
- decorator==5.1.1
- future==1.0.0
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- nose==1.3.7
- nose-cov==1.6
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrestcli==0.6.11
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- shapely==1.8.5.post1
- six==1.17.0
- tomli==1.2.3
- tqdm==4.64.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webcolors==1.7
- zipp==3.6.0
prefix: /opt/conda/envs/cartoframes
| [
"test/test_layer.py::TestBaseMap::test_basemap_invalid",
"test/test_layer.py::TestBaseMap::test_basemap_source"
] | [
"test/test_context.py::TestCartoContext::test_add_encoded_geom",
"test/test_context.py::TestCartoContext::test_cartocontext",
"test/test_context.py::TestCartoContext::test_cartocontext_check_query",
"test/test_context.py::TestCartoContext::test_cartocontext_credentials",
"test/test_context.py::TestCartoContext::test_cartocontext_delete",
"test/test_context.py::TestCartoContext::test_cartocontext_handle_import",
"test/test_context.py::TestCartoContext::test_cartocontext_isorguser",
"test/test_context.py::TestCartoContext::test_cartocontext_map",
"test/test_context.py::TestCartoContext::test_cartocontext_map_geom_type",
"test/test_context.py::TestCartoContext::test_cartocontext_mixed_case",
"test/test_context.py::TestCartoContext::test_cartocontext_read",
"test/test_context.py::TestCartoContext::test_cartocontext_table_exists",
"test/test_context.py::TestCartoContext::test_cartocontext_write",
"test/test_context.py::TestCartoContext::test_cartoframes_query",
"test/test_context.py::TestCartoContext::test_cartoframes_sync",
"test/test_context.py::TestCartoContext::test_data_obs_functions",
"test/test_context.py::TestCartoContext::test_debug_print",
"test/test_context.py::TestCartoContext::test_get_bounds",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus_methods",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus_repr"
] | [
"test/test_context.py::TestCartoContext::test_cartocontext_send_dataframe",
"test/test_context.py::TestCartoContext::test_decode_geom",
"test/test_context.py::TestCartoContext::test_df2pg_schema",
"test/test_context.py::TestCartoContext::test_dtypes2pg",
"test/test_context.py::TestCartoContext::test_encode_geom",
"test/test_context.py::TestCartoContext::test_pg2dtypes",
"test/test_layer.py::TestAbstractLayer::test_class",
"test/test_layer.py::TestQueryLayer::test_querylayer_colors",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_and_time",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_column_key",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_default",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_defaults",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_default",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_errors"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,749 | 1,682 | [
"cartoframes/context.py",
"cartoframes/layer.py",
"cartoframes/maps.py",
"cartoframes/utils.py"
] |
|
bmcfee__pumpp-92 | 0d6ef78f21fb7bf4736b2ca383c3377f7e0f8f9d | 2017-10-12 15:13:19 | 68a14caccc9acdfc280c98fed85f6c1ee2596702 | diff --git a/pumpp/core.py b/pumpp/core.py
index 0db13b0..227cb81 100644
--- a/pumpp/core.py
+++ b/pumpp/core.py
@@ -11,6 +11,7 @@ Core functionality
import librosa
import jams
+import six
from .base import Slicer
from .exceptions import ParameterError
@@ -225,3 +226,28 @@ class Pump(Slicer):
def __call__(self, *args, **kwargs):
return self.transform(*args, **kwargs)
+
+ def __str__(self):
+ rstr = '<Pump [{:d} operators, {:d} fields]>'.format(len(self.ops),
+ len(self.fields))
+ for key in self.opmap:
+ rstr += "\n - '{}': {}".format(key, type(self.opmap[key]))
+ for field in self.opmap[key].fields:
+ rstr += "\n - '{}': {}".format(field, self.opmap[key].fields[field])
+ return rstr
+
+ def _repr_html_(self):
+
+ rstr = '<dl class="row">'
+ for key in self.opmap:
+ rstr += '\n <dt class="col-sm-3">{:s}</dt>'.format(key)
+ rstr += '\n <dd class="col-sm-9">{}'.format(self.opmap[key])
+
+ rstr += '<ul>'
+ for fkey, field in six.iteritems(self.opmap[key].fields):
+ rstr += '\n <li>{:s} [shape={}, dtype={}]</li>'.format(fkey,
+ field.shape,
+ field.dtype.__name__)
+ rstr += '</ul></dd>'
+ rstr += '</dl>'
+ return rstr
| Meaningful repr for pump objects
#### Description
It would be useful if `repr(Pump)` showed the operator map directly. | bmcfee/pumpp | diff --git a/tests/test_core.py b/tests/test_core.py
index b728699..ae5b1fc 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -212,3 +212,45 @@ def test_pump_layers(sr, hop_length):
assert L1[k].dtype == L2[k].dtype
for d1, d2 in zip(L1[k].shape, L2[k].shape):
assert str(d1) == str(d2)
+
+
+def test_pump_str(sr, hop_length):
+
+ ops = [pumpp.feature.STFT(name='stft', sr=sr,
+ hop_length=hop_length,
+ n_fft=2*hop_length),
+
+ pumpp.task.BeatTransformer(name='beat', sr=sr,
+ hop_length=hop_length),
+
+ pumpp.task.ChordTransformer(name='chord', sr=sr,
+ hop_length=hop_length),
+
+ pumpp.task.StaticLabelTransformer(name='tags',
+ namespace='tag_open',
+ labels=['rock', 'jazz'])]
+
+ pump = pumpp.Pump(*ops)
+
+ assert isinstance(str(pump), str)
+
+
+def test_pump_repr_html(sr, hop_length):
+
+ ops = [pumpp.feature.STFT(name='stft', sr=sr,
+ hop_length=hop_length,
+ n_fft=2*hop_length),
+
+ pumpp.task.BeatTransformer(name='beat', sr=sr,
+ hop_length=hop_length),
+
+ pumpp.task.ChordTransformer(name='chord', sr=sr,
+ hop_length=hop_length),
+
+ pumpp.task.StaticLabelTransformer(name='tags',
+ namespace='tag_open',
+ labels=['rock', 'jazz'])]
+
+ pump = pumpp.Pump(*ops)
+
+ assert isinstance(pump._repr_html_(), str)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y ffmpeg"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | absl-py==0.15.0
alabaster==0.7.13
appdirs==1.4.4
astunparse==1.6.3
attrs==22.2.0
audioread==3.0.1
Babel==2.11.0
cached-property==1.5.2
cachetools==4.2.4
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
clang==5.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
docutils==0.18.1
flatbuffers==1.12
gast==0.4.0
google-auth==1.35.0
google-auth-oauthlib==0.4.6
google-pasta==0.2.0
grpcio==1.48.2
h5py==3.1.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jams==0.3.4
Jinja2==3.0.3
joblib==1.1.1
jsonschema==3.2.0
keras==2.6.0
Keras-Preprocessing==1.1.2
librosa==0.9.2
llvmlite==0.36.0
Markdown==3.3.7
MarkupSafe==2.0.1
mir_eval==0.8.2
numba==0.53.1
numpy==1.19.5
numpydoc==1.1.0
oauthlib==3.2.2
opt-einsum==3.3.0
packaging==21.3
pandas==1.1.5
pluggy==1.0.0
pooch==1.6.0
protobuf==3.19.6
-e git+https://github.com/bmcfee/pumpp.git@0d6ef78f21fb7bf4736b2ca383c3377f7e0f8f9d#egg=pumpp
py==1.11.0
pyasn1==0.5.1
pyasn1-modules==0.3.0
pycparser==2.21
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
requests-oauthlib==2.0.0
resampy==0.4.3
rsa==4.9
scikit-learn==0.24.2
scipy==1.5.4
six==1.15.0
snowballstemmer==2.2.0
sortedcontainers==2.4.0
soundfile==0.13.1
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tensorboard==2.6.0
tensorboard-data-server==0.6.1
tensorboard-plugin-wit==1.8.1
tensorflow==2.6.2
tensorflow-estimator==2.6.0
termcolor==1.1.0
threadpoolctl==3.1.0
tomli==1.2.3
typing-extensions==3.7.4.3
urllib3==1.26.20
Werkzeug==2.0.3
wrapt==1.12.1
zipp==3.6.0
| name: pumpp
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- absl-py==0.15.0
- alabaster==0.7.13
- appdirs==1.4.4
- astunparse==1.6.3
- attrs==22.2.0
- audioread==3.0.1
- babel==2.11.0
- cached-property==1.5.2
- cachetools==4.2.4
- cffi==1.15.1
- charset-normalizer==2.0.12
- clang==5.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- docutils==0.18.1
- flatbuffers==1.12
- gast==0.4.0
- google-auth==1.35.0
- google-auth-oauthlib==0.4.6
- google-pasta==0.2.0
- grpcio==1.48.2
- h5py==3.1.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jams==0.3.4
- jinja2==3.0.3
- joblib==1.1.1
- jsonschema==3.2.0
- keras==2.6.0
- keras-preprocessing==1.1.2
- librosa==0.9.2
- llvmlite==0.36.0
- markdown==3.3.7
- markupsafe==2.0.1
- mir-eval==0.8.2
- numba==0.53.1
- numpy==1.19.5
- numpydoc==1.1.0
- oauthlib==3.2.2
- opt-einsum==3.3.0
- packaging==21.3
- pandas==1.1.5
- pluggy==1.0.0
- pooch==1.6.0
- protobuf==3.19.6
- py==1.11.0
- pyasn1==0.5.1
- pyasn1-modules==0.3.0
- pycparser==2.21
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- requests-oauthlib==2.0.0
- resampy==0.4.3
- rsa==4.9
- scikit-learn==0.24.2
- scipy==1.5.4
- six==1.15.0
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- soundfile==0.13.1
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tensorboard==2.6.0
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- tensorflow==2.6.2
- tensorflow-estimator==2.6.0
- termcolor==1.1.0
- threadpoolctl==3.1.0
- tomli==1.2.3
- typing-extensions==3.7.4.3
- urllib3==1.26.20
- werkzeug==2.0.3
- wrapt==1.12.1
- zipp==3.6.0
prefix: /opt/conda/envs/pumpp
| [
"tests/test_core.py::test_pump_repr_html[11025-128]",
"tests/test_core.py::test_pump_repr_html[11025-512]",
"tests/test_core.py::test_pump_repr_html[22050-128]",
"tests/test_core.py::test_pump_repr_html[22050-512]"
] | [] | [
"tests/test_core.py::test_pump[None-11025-128-False-None-None-None]",
"tests/test_core.py::test_pump[None-11025-128-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-128-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-False-22050-None-None]",
"tests/test_core.py::test_pump[None-11025-128-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-False-44100-None-None]",
"tests/test_core.py::test_pump[None-11025-128-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-True-None-None-None]",
"tests/test_core.py::test_pump[None-11025-128-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-128-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-True-22050-None-None]",
"tests/test_core.py::test_pump[None-11025-128-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-True-44100-None-None]",
"tests/test_core.py::test_pump[None-11025-128-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-128-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-False-None-None-None]",
"tests/test_core.py::test_pump[None-11025-512-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-512-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-False-22050-None-None]",
"tests/test_core.py::test_pump[None-11025-512-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-False-44100-None-None]",
"tests/test_core.py::test_pump[None-11025-512-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-True-None-None-None]",
"tests/test_core.py::test_pump[None-11025-512-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-512-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-True-22050-None-None]",
"tests/test_core.py::test_pump[None-11025-512-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-True-44100-None-None]",
"tests/test_core.py::test_pump[None-11025-512-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-11025-512-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-11025-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-False-None-None-None]",
"tests/test_core.py::test_pump[None-22050-128-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-128-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-False-22050-None-None]",
"tests/test_core.py::test_pump[None-22050-128-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-False-44100-None-None]",
"tests/test_core.py::test_pump[None-22050-128-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-True-None-None-None]",
"tests/test_core.py::test_pump[None-22050-128-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-128-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-True-22050-None-None]",
"tests/test_core.py::test_pump[None-22050-128-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-True-44100-None-None]",
"tests/test_core.py::test_pump[None-22050-128-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-128-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-False-None-None-None]",
"tests/test_core.py::test_pump[None-22050-512-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-512-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-False-22050-None-None]",
"tests/test_core.py::test_pump[None-22050-512-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-False-44100-None-None]",
"tests/test_core.py::test_pump[None-22050-512-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-True-None-None-None]",
"tests/test_core.py::test_pump[None-22050-512-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-512-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-True-22050-None-None]",
"tests/test_core.py::test_pump[None-22050-512-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-True-44100-None-None]",
"tests/test_core.py::test_pump[None-22050-512-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[None-22050-512-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[None-22050-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-None-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-False-None-None-None]",
"tests/test_core.py::test_pump[jam2-11025-128-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-128-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-False-22050-None-None]",
"tests/test_core.py::test_pump[jam2-11025-128-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-False-44100-None-None]",
"tests/test_core.py::test_pump[jam2-11025-128-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-True-None-None-None]",
"tests/test_core.py::test_pump[jam2-11025-128-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-128-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-True-22050-None-None]",
"tests/test_core.py::test_pump[jam2-11025-128-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-True-44100-None-None]",
"tests/test_core.py::test_pump[jam2-11025-128-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-128-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-False-None-None-None]",
"tests/test_core.py::test_pump[jam2-11025-512-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-512-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-False-22050-None-None]",
"tests/test_core.py::test_pump[jam2-11025-512-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-False-44100-None-None]",
"tests/test_core.py::test_pump[jam2-11025-512-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-True-None-None-None]",
"tests/test_core.py::test_pump[jam2-11025-512-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-512-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-True-22050-None-None]",
"tests/test_core.py::test_pump[jam2-11025-512-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-True-44100-None-None]",
"tests/test_core.py::test_pump[jam2-11025-512-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-11025-512-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-11025-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-False-None-None-None]",
"tests/test_core.py::test_pump[jam2-22050-128-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-128-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-False-22050-None-None]",
"tests/test_core.py::test_pump[jam2-22050-128-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-False-44100-None-None]",
"tests/test_core.py::test_pump[jam2-22050-128-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-True-None-None-None]",
"tests/test_core.py::test_pump[jam2-22050-128-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-128-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-True-22050-None-None]",
"tests/test_core.py::test_pump[jam2-22050-128-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-True-44100-None-None]",
"tests/test_core.py::test_pump[jam2-22050-128-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-128-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-False-None-None-None]",
"tests/test_core.py::test_pump[jam2-22050-512-False-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-False-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-512-False-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-False-22050-None-None]",
"tests/test_core.py::test_pump[jam2-22050-512-False-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-False-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-False-44100-None-None]",
"tests/test_core.py::test_pump[jam2-22050-512-False-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-False-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-True-None-None-None]",
"tests/test_core.py::test_pump[jam2-22050-512-True-None-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-True-None-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-512-True-None-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-True-22050-None-None]",
"tests/test_core.py::test_pump[jam2-22050-512-True-22050-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-True-22050-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-True-44100-None-None]",
"tests/test_core.py::test_pump[jam2-22050-512-True-44100-None-tests/data/test.ogg]",
"tests/test_core.py::test_pump[jam2-22050-512-True-44100-tests/data/test.ogg-None]",
"tests/test_core.py::test_pump[jam2-22050-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[None-11025-128-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[None-11025-512-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[None-22050-128-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[None-22050-512-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[tests/data/test.jams-11025-128-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[tests/data/test.jams-11025-512-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[tests/data/test.jams-22050-128-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[tests/data/test.jams-22050-512-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[jam2-11025-128-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[jam2-11025-512-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[jam2-22050-128-tests/data/test.ogg]",
"tests/test_core.py::test_pump_empty[jam2-22050-512-tests/data/test.ogg]",
"tests/test_core.py::test_pump_add[11025-128]",
"tests/test_core.py::test_pump_add[11025-512]",
"tests/test_core.py::test_pump_add[22050-128]",
"tests/test_core.py::test_pump_add[22050-512]",
"tests/test_core.py::test_pump_sampler[11025-128-None-1-None]",
"tests/test_core.py::test_pump_sampler[11025-128-None-1-10]",
"tests/test_core.py::test_pump_sampler[11025-128-None-5-None]",
"tests/test_core.py::test_pump_sampler[11025-128-None-5-10]",
"tests/test_core.py::test_pump_sampler[11025-128-1-1-None]",
"tests/test_core.py::test_pump_sampler[11025-128-1-1-10]",
"tests/test_core.py::test_pump_sampler[11025-128-1-5-None]",
"tests/test_core.py::test_pump_sampler[11025-128-1-5-10]",
"tests/test_core.py::test_pump_sampler[11025-512-None-1-None]",
"tests/test_core.py::test_pump_sampler[11025-512-None-1-10]",
"tests/test_core.py::test_pump_sampler[11025-512-None-5-None]",
"tests/test_core.py::test_pump_sampler[11025-512-None-5-10]",
"tests/test_core.py::test_pump_sampler[11025-512-1-1-None]",
"tests/test_core.py::test_pump_sampler[11025-512-1-1-10]",
"tests/test_core.py::test_pump_sampler[11025-512-1-5-None]",
"tests/test_core.py::test_pump_sampler[11025-512-1-5-10]",
"tests/test_core.py::test_pump_sampler[22050-128-None-1-None]",
"tests/test_core.py::test_pump_sampler[22050-128-None-1-10]",
"tests/test_core.py::test_pump_sampler[22050-128-None-5-None]",
"tests/test_core.py::test_pump_sampler[22050-128-None-5-10]",
"tests/test_core.py::test_pump_sampler[22050-128-1-1-None]",
"tests/test_core.py::test_pump_sampler[22050-128-1-1-10]",
"tests/test_core.py::test_pump_sampler[22050-128-1-5-None]",
"tests/test_core.py::test_pump_sampler[22050-128-1-5-10]",
"tests/test_core.py::test_pump_sampler[22050-512-None-1-None]",
"tests/test_core.py::test_pump_sampler[22050-512-None-1-10]",
"tests/test_core.py::test_pump_sampler[22050-512-None-5-None]",
"tests/test_core.py::test_pump_sampler[22050-512-None-5-10]",
"tests/test_core.py::test_pump_sampler[22050-512-1-1-None]",
"tests/test_core.py::test_pump_sampler[22050-512-1-1-10]",
"tests/test_core.py::test_pump_sampler[22050-512-1-5-None]",
"tests/test_core.py::test_pump_sampler[22050-512-1-5-10]",
"tests/test_core.py::test_pump_str[11025-128]",
"tests/test_core.py::test_pump_str[11025-512]",
"tests/test_core.py::test_pump_str[22050-128]",
"tests/test_core.py::test_pump_str[22050-512]"
] | [] | ISC License | 1,752 | 419 | [
"pumpp/core.py"
] |
|
asottile__all-repos-22 | 5c980cde22d85a79eec64f25fb0e31ffa0990c16 | 2017-10-13 04:31:31 | 6835d50a1d65e98a44a21386ce6ec37703ce8f93 | diff --git a/all_repos/autofix_lib.py b/all_repos/autofix_lib.py
index 8407bee..8b2372e 100644
--- a/all_repos/autofix_lib.py
+++ b/all_repos/autofix_lib.py
@@ -27,7 +27,8 @@ class Commit(collections.namedtuple(
class AutofixSettings(collections.namedtuple(
- 'AutofixSettings', ('jobs', 'color', 'limit', 'dry_run'),
+ 'AutofixSettings',
+ ('jobs', 'color', 'limit', 'dry_run', 'interactive'),
)):
__slots__ = ()
@@ -35,7 +36,7 @@ class AutofixSettings(collections.namedtuple(
def from_cli(cls, args):
return cls(
jobs=args.jobs, color=args.color, limit=args.limit,
- dry_run=args.dry_run,
+ dry_run=args.dry_run, interactive=args.interactive,
)
@@ -112,6 +113,43 @@ def repo_context(repo, *, use_color):
traceback.print_exc()
+def _interactive_check(*, use_color):
+ def _quit():
+ print('Goodbye!')
+ raise SystemExit()
+
+ while True:
+ try:
+ s = input(color.fmt(
+ '***Looks good [y,n,s,q,?]? ',
+ color.BLUE_B, use_color=use_color,
+ ))
+ except (EOFError, KeyboardInterrupt):
+ _quit()
+
+ s = s.strip().lower()
+ if s in {'y', 'yes'}:
+ return True
+ elif s in {'n', 'no'}:
+ return False
+ elif s in {'s', 'shell'}:
+ print('Opening an interactive shell, type `exit` to continue.')
+ print('Any modifications will be committed.')
+ subprocess.call(os.environ.get('SHELL', 'bash'))
+ elif s in {'q', 'quit'}:
+ _quit()
+ else:
+ if s not in {'?', 'help'}:
+ print(color.fmt(
+ f'Unexpected input: {s}', color.RED, use_color=use_color,
+ ))
+ print('y (yes): yes it looks good, commit and continue.')
+ print('n (no): no, do not commit this repository.')
+ print('s (shell): open an interactive shell in the repo.')
+ print('q (quit, ^C): early exit from the autofixer.')
+ print('? (help): show this help message.')
+
+
def _fix_inner(repo, apply_fix, check_fix, config, commit, autofix_settings):
with repo_context(repo, use_color=autofix_settings.color):
branch_name = f'all-repos_autofix_{commit.branch_name}'
@@ -125,6 +163,12 @@ def _fix_inner(repo, apply_fix, check_fix, config, commit, autofix_settings):
check_fix()
+ if (
+ autofix_settings.interactive and
+ not _interactive_check(use_color=autofix_settings.color)
+ ):
+ return
+
commit_message = (
f'{commit.msg}\n\n'
f'Committed via https://github.com/asottile/all-repos'
@@ -153,6 +197,7 @@ def fix(
commit: Commit,
autofix_settings: AutofixSettings,
):
+ assert not autofix_settings.interactive or autofix_settings.jobs == 1
repos = tuple(repos)[:autofix_settings.limit]
func = functools.partial(
_fix_inner,
diff --git a/all_repos/cli.py b/all_repos/cli.py
index 40f5177..2357878 100644
--- a/all_repos/cli.py
+++ b/all_repos/cli.py
@@ -40,8 +40,16 @@ def add_color_arg(parser):
def add_fixer_args(parser):
add_config_arg(parser)
- add_jobs_arg(parser, default=1)
add_color_arg(parser)
+
+ mutex = parser.add_mutually_exclusive_group()
+ mutex.add_argument('--dry-run', action='store_true')
+ mutex.add_argument(
+ '-i', '--interactive', help='Interactively approve / deny fixes.',
+ )
+ add_jobs_arg(mutex, default=1)
+
+ parser.add_argument('--limit', type=int, default=None)
parser.add_argument(
'--author',
help=(
@@ -50,6 +58,4 @@ def add_fixer_args(parser):
"An example: `--author='Herp Derp <[email protected]>'`"
),
)
- parser.add_argument('--dry-run', action='store_true')
- parser.add_argument('--limit', type=int, default=None)
parser.add_argument('--repos', nargs='*')
diff --git a/all_repos/color.py b/all_repos/color.py
index f2def75..0f9a61d 100644
--- a/all_repos/color.py
+++ b/all_repos/color.py
@@ -1,4 +1,5 @@
BLUE_B = '\033[1;34m'
+RED = '\033[31m'
RED_H = '\033[41m'
TURQUOISE = '\033[36m'
TURQUOISE_H = '\033[46;30m'
| autofix_lib: --interactive
Add an interactive mode for autofixers which utilize `autofix_lib` | asottile/all-repos | diff --git a/tests/autofix_lib_test.py b/tests/autofix_lib_test.py
index 9a1a57c..b3ab17d 100644
--- a/tests/autofix_lib_test.py
+++ b/tests/autofix_lib_test.py
@@ -1,5 +1,7 @@
+import builtins
import os
import subprocess
+from unittest import mock
import pytest
from pre_commit.constants import VERSION as PRE_COMMIT_VERSION
@@ -90,6 +92,120 @@ def test_repo_context_errors(file_config_files, capsys):
assert 'assert False' in err
+def _get_input_side_effect(*inputs):
+ it = iter(inputs)
+
+ def side_effect(s):
+ print(s, end='')
+ ret = next(it)
+ if ret in (EOFError, KeyboardInterrupt):
+ print({EOFError: '^D', KeyboardInterrupt: '^C'}[ret])
+ raise ret
+ else:
+ print(f'<<{ret}')
+ return ret
+ return side_effect
+
+
[email protected]_fixture
+def mock_input():
+ with mock.patch.object(builtins, 'input') as mck:
+ yield mck
+
+
+def test_interactive_control_c(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect(KeyboardInterrupt)
+ with pytest.raises(SystemExit):
+ autofix_lib._interactive_check(use_color=False)
+ out, _ = capfd.readouterr()
+ assert out == (
+ '***Looks good [y,n,s,q,?]? ^C\n'
+ 'Goodbye!\n'
+ )
+
+
+def test_interactive_eof(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect(EOFError)
+ with pytest.raises(SystemExit):
+ autofix_lib._interactive_check(use_color=False)
+ out, _ = capfd.readouterr()
+ assert out == (
+ '***Looks good [y,n,s,q,?]? ^D\n'
+ 'Goodbye!\n'
+ )
+
+
+def test_interactive_quit(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect('q')
+ with pytest.raises(SystemExit):
+ autofix_lib._interactive_check(use_color=False)
+ out, _ = capfd.readouterr()
+ assert out == (
+ '***Looks good [y,n,s,q,?]? <<q\n'
+ 'Goodbye!\n'
+ )
+
+
+def test_interactive_yes(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect('y')
+ assert autofix_lib._interactive_check(use_color=False) is True
+ out, _ = capfd.readouterr()
+ assert out == '***Looks good [y,n,s,q,?]? <<y\n'
+
+
+def test_interactive_no(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect('n')
+ assert autofix_lib._interactive_check(use_color=False) is False
+ out, _ = capfd.readouterr()
+ assert out == '***Looks good [y,n,s,q,?]? <<n\n'
+
+
+def test_interactive_shell(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect('s', 'n')
+ with mock.patch.dict(os.environ, {'SHELL': 'echo'}):
+ assert autofix_lib._interactive_check(use_color=False) is False
+ out, _ = capfd.readouterr()
+ assert out == (
+ '***Looks good [y,n,s,q,?]? <<s\n'
+ 'Opening an interactive shell, type `exit` to continue.\n'
+ 'Any modifications will be committed.\n'
+ # A newline from echo
+ '\n'
+ '***Looks good [y,n,s,q,?]? <<n\n'
+ )
+
+
+def test_interactive_help(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect('?', 'n')
+ assert autofix_lib._interactive_check(use_color=False) is False
+ out, _ = capfd.readouterr()
+ assert out == (
+ '***Looks good [y,n,s,q,?]? <<?\n'
+ 'y (yes): yes it looks good, commit and continue.\n'
+ 'n (no): no, do not commit this repository.\n'
+ 's (shell): open an interactive shell in the repo.\n'
+ 'q (quit, ^C): early exit from the autofixer.\n'
+ '? (help): show this help message.\n'
+ '***Looks good [y,n,s,q,?]? <<n\n'
+ )
+
+
+def test_interactive_garbage(mock_input, capfd):
+ mock_input.side_effect = _get_input_side_effect('garbage', 'n')
+ assert autofix_lib._interactive_check(use_color=False) is False
+ out, _ = capfd.readouterr()
+ assert out == (
+ '***Looks good [y,n,s,q,?]? <<garbage\n'
+ 'Unexpected input: garbage\n'
+ 'y (yes): yes it looks good, commit and continue.\n'
+ 'n (no): no, do not commit this repository.\n'
+ 's (shell): open an interactive shell in the repo.\n'
+ 'q (quit, ^C): early exit from the autofixer.\n'
+ '? (help): show this help message.\n'
+ '***Looks good [y,n,s,q,?]? <<n\n'
+ )
+
+
def lower_case_f():
f_contents = open('f').read()
with open('f', 'w') as f:
@@ -110,7 +226,7 @@ def test_fix_dry_run_no_change(file_config_files, capfd):
config=load_config(file_config_files.cfg),
commit=autofix_lib.Commit('message!', 'test-branch', None),
autofix_settings=autofix_lib.AutofixSettings(
- jobs=1, color=False, limit=None, dry_run=True,
+ jobs=1, color=False, limit=None, dry_run=True, interactive=False,
),
)
@@ -136,7 +252,7 @@ def test_fix_with_limit(file_config_files, capfd):
config=load_config(file_config_files.cfg),
commit=autofix_lib.Commit('message!', 'test-branch', None),
autofix_settings=autofix_lib.AutofixSettings(
- jobs=1, color=False, limit=1, dry_run=True,
+ jobs=1, color=False, limit=1, dry_run=True, interactive=False,
),
)
@@ -148,6 +264,25 @@ def test_fix_with_limit(file_config_files, capfd):
assert '-OHELLO\n+ohello\n' not in out
+def test_fix_interactive(file_config_files, capfd, mock_input):
+ mock_input.side_effect = _get_input_side_effect('y', 'n')
+ autofix_lib.fix(
+ (
+ str(file_config_files.output_dir.join('repo1')),
+ str(file_config_files.output_dir.join('repo2')),
+ ),
+ apply_fix=lower_case_f,
+ config=load_config(file_config_files.cfg),
+ commit=autofix_lib.Commit('message!', 'test-branch', None),
+ autofix_settings=autofix_lib.AutofixSettings(
+ jobs=1, color=False, limit=None, dry_run=False, interactive=True,
+ ),
+ )
+
+ assert file_config_files.dir1.join('f').read() == 'ohai\n'
+ assert file_config_files.dir2.join('f').read() == 'OHELLO\n'
+
+
def test_autofix_makes_commits(file_config_files, capfd):
autofix_lib.fix(
(
@@ -158,7 +293,7 @@ def test_autofix_makes_commits(file_config_files, capfd):
config=load_config(file_config_files.cfg),
commit=autofix_lib.Commit('message!', 'test-branch', 'A B <[email protected]>'),
autofix_settings=autofix_lib.AutofixSettings(
- jobs=1, color=False, limit=None, dry_run=False,
+ jobs=1, color=False, limit=None, dry_run=False, interactive=False,
),
)
@@ -201,7 +336,7 @@ def test_fix_failing_check_no_changes(file_config_files, capfd):
config=load_config(file_config_files.cfg),
commit=autofix_lib.Commit('message!', 'test-branch', None),
autofix_settings=autofix_lib.AutofixSettings(
- jobs=1, color=False, limit=None, dry_run=False,
+ jobs=1, color=False, limit=None, dry_run=False, interactive=False,
),
)
@@ -226,7 +361,7 @@ def test_noop_does_not_commit(file_config_files, capfd):
config=load_config(file_config_files.cfg),
commit=autofix_lib.Commit('message!', 'test-branch', None),
autofix_settings=autofix_lib.AutofixSettings(
- jobs=1, color=False, limit=None, dry_run=False,
+ jobs=1, color=False, limit=None, dry_run=False, interactive=False,
),
)
rev_after1 = testing.git.revparse(file_config_files.dir1)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-env"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/all-repos.git@5c980cde22d85a79eec64f25fb0e31ffa0990c16#egg=all_repos
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
idna==3.10
iniconfig==2.1.0
mccabe==0.7.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
pytest-env==1.1.5
PyYAML==6.0.2
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
virtualenv==20.29.3
| name: all-repos
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- idna==3.10
- iniconfig==2.1.0
- mccabe==0.7.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pytest-env==1.1.5
- pyyaml==6.0.2
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/all-repos
| [
"tests/autofix_lib_test.py::test_interactive_control_c",
"tests/autofix_lib_test.py::test_interactive_eof",
"tests/autofix_lib_test.py::test_interactive_quit",
"tests/autofix_lib_test.py::test_interactive_yes",
"tests/autofix_lib_test.py::test_interactive_no",
"tests/autofix_lib_test.py::test_interactive_shell",
"tests/autofix_lib_test.py::test_interactive_help",
"tests/autofix_lib_test.py::test_interactive_garbage",
"tests/autofix_lib_test.py::test_fix_dry_run_no_change",
"tests/autofix_lib_test.py::test_fix_with_limit",
"tests/autofix_lib_test.py::test_fix_interactive",
"tests/autofix_lib_test.py::test_autofix_makes_commits",
"tests/autofix_lib_test.py::test_fix_failing_check_no_changes",
"tests/autofix_lib_test.py::test_noop_does_not_commit"
] | [] | [
"tests/autofix_lib_test.py::test_filter_repos[None-expected0]",
"tests/autofix_lib_test.py::test_filter_repos[cli_repos1-expected1]",
"tests/autofix_lib_test.py::test_filter_repos[cli_repos2-expected2]",
"tests/autofix_lib_test.py::test_assert_importable_is_importable",
"tests/autofix_lib_test.py::test_assert_importable_not_importable",
"tests/autofix_lib_test.py::test_require_version_new_enough",
"tests/autofix_lib_test.py::test_require_version_not_new_enough",
"tests/autofix_lib_test.py::test_run",
"tests/autofix_lib_test.py::test_cwd",
"tests/autofix_lib_test.py::test_repo_context_success",
"tests/autofix_lib_test.py::test_repo_context_errors"
] | [] | MIT License | 1,756 | 1,243 | [
"all_repos/autofix_lib.py",
"all_repos/cli.py",
"all_repos/color.py"
] |
|
pynamodb__PynamoDB-374 | c4d89e95bc747173651d5a529992a2ca8f03bddb | 2017-10-13 20:51:49 | 1828bda52376a4b0313146b64ffb447e5392f467 | diff --git a/pynamodb/models.py b/pynamodb/models.py
index 2425106..1e539de 100644
--- a/pynamodb/models.py
+++ b/pynamodb/models.py
@@ -344,7 +344,6 @@ class Model(AttributeContainer):
attribute_cls = None
for attr_name, attr_cls in self._get_attributes().items():
if attr_name == attribute:
- value = attr_cls.serialize(value)
attribute_cls = attr_cls
break
if not attribute_cls:
@@ -360,8 +359,10 @@ class Model(AttributeContainer):
ACTION: action.upper() if action else None,
}
}
- if action is not None and action.upper() != DELETE:
- kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name][VALUE] = {ATTR_TYPE_MAP[attribute_cls.attr_type]: value}
+ if value is not None:
+ kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name][VALUE] = {
+ ATTR_TYPE_MAP[attribute_cls.attr_type]: attribute_cls.serialize(value)
+ }
kwargs[pythonic(RETURN_VALUES)] = ALL_NEW
kwargs.update(conditional_operator=conditional_operator)
kwargs.update(condition=condition)
@@ -415,7 +416,7 @@ class Model(AttributeContainer):
attribute_cls = attrs[attr]
action = params['action'] and params['action'].upper()
attr_values = {ACTION: action}
- if action != DELETE:
+ if 'value' in params:
attr_values[VALUE] = self._serialize_value(attribute_cls, params['value'])
kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name] = attr_values
| update_item delete item from set fails
Here is a simple test case that attempts to remove an value from a UnicodeSet attribute:
``` python
from pynamodb.models import Model
from pynamodb.attributes import NumberAttribute, UnicodeSetAttribute
class UserModel(Model):
class Meta:
table_name = 'User'
region = 'us-west-2'
read_capacity_units = 1
write_capacity_units = 1
host = 'http://localhost:8000'
id = NumberAttribute(hash_key=True)
nicknames = UnicodeSetAttribute(null=True)
UserModel.create_table(wait=True)
try:
original_nicknames = {'name1', 'name2', 'name3'}
nicknames_to_remove = {'name2'}
expected_result_nicknames = original_nicknames - nicknames_to_remove
user = UserModel(id=1, nicknames=original_nicknames)
user.save()
user = UserModel.get(1)
print('original nicknames:', user.nicknames)
assert original_nicknames == user.nicknames
print('nicknames to remove:', nicknames_to_remove)
user.update_item('nicknames', value=nicknames_to_remove, action='delete')
user = UserModel.get(1)
print('expected result nicknames:', expected_result_nicknames)
print(' actual result nicknames:', user.nicknames)
print()
assert expected_result_nicknames == user.nicknames
finally:
UserModel.delete_table()
```
When running this test case, the output is:
```
original nicknames: {'name2', 'name3', 'name1'}
nicknames to remove: {'name2'}
expected result nicknames: {'name3', 'name1'}
actual result nicknames: None
Traceback (most recent call last):
File "mytest.py", line 34, in <module>
assert expected_result_nicknames == user.nicknames
AssertionError
```
I expected 'name2' to be removed from the `{'name1', 'name2', 'name3'}` set, but instead all the values were removed.
This bug exists since 1.4.4 (1.4.3 works fine), and is still present in 1.6.0. The associated pull request has been updated to be be based on the post 1.6.0 devel branch, and it includes a unit test that fails with only the 'pynamodb/tests' commits applied but succeeds with the changes to connection/base.py and models.py (for update_item of connection and model, respectively).
| pynamodb/PynamoDB | diff --git a/pynamodb/tests/test_model.py b/pynamodb/tests/test_model.py
index 571c1e3..2688fb4 100644
--- a/pynamodb/tests/test_model.py
+++ b/pynamodb/tests/test_model.py
@@ -1080,6 +1080,44 @@ class ModelTestCase(TestCase):
assert item.views is None
self.assertEquals(set(['bob']), item.custom_aliases)
+ # Reproduces https://github.com/pynamodb/PynamoDB/issues/132
+ with patch(PATCH_METHOD) as req:
+ req.return_value = {
+ ATTRIBUTES: {
+ "aliases": {
+ "SS": set(["alias1", "alias3"])
+ }
+ }
+ }
+ item.update({
+ 'custom_aliases': {'value': set(['alias2']), 'action': 'delete'},
+ })
+
+ args = req.call_args[0][1]
+ params = {
+ 'TableName': 'SimpleModel',
+ 'ReturnValues': 'ALL_NEW',
+ 'Key': {
+ 'user_name': {
+ 'S': 'foo'
+ }
+ },
+ 'UpdateExpression': 'DELETE #0 :0',
+ 'ExpressionAttributeNames': {
+ '#0': 'aliases'
+ },
+ 'ExpressionAttributeValues': {
+ ':0': {
+ 'SS': set(['alias2'])
+ }
+ },
+ 'ReturnConsumedCapacity': 'TOTAL'
+ }
+ deep_eq(args, params, _assert=True)
+
+ assert item.views is None
+ self.assertEquals(set(['alias1', 'alias3']), item.custom_aliases)
+
def test_update_item(self):
"""
Model.update_item
@@ -1577,6 +1615,38 @@ class ModelTestCase(TestCase):
}
deep_eq(args, params, _assert=True)
+ # Reproduces https://github.com/pynamodb/PynamoDB/issues/132
+ with patch(PATCH_METHOD) as req:
+ req.return_value = {
+ ATTRIBUTES: {
+ "aliases": {
+ "SS": set(["alias1", "alias3"])
+ }
+ }
+ }
+ item.update_item('custom_aliases', set(['alias2']), action='delete')
+ args = req.call_args[0][1]
+ params = {
+ 'TableName': 'SimpleModel',
+ 'ReturnValues': 'ALL_NEW',
+ 'Key': {
+ 'user_name': {
+ 'S': 'foo'
+ }
+ },
+ 'UpdateExpression': 'DELETE #0 :0',
+ 'ExpressionAttributeNames': {
+ '#0': 'aliases'
+ },
+ 'ExpressionAttributeValues': {
+ ':0': {
+ 'SS': set(['alias2'])
+ }
+ },
+ 'ReturnConsumedCapacity': 'TOTAL'
+ }
+ deep_eq(args, params, _assert=True)
+ self.assertEqual(set(["alias1", "alias3"]), item.custom_aliases)
def test_save(self):
"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
botocore==1.2.0
certifi==2021.5.30
docutils==0.18.1
importlib-metadata==4.8.3
iniconfig==1.1.1
jmespath==0.7.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/pynamodb/PynamoDB.git@c4d89e95bc747173651d5a529992a2ca8f03bddb#egg=pynamodb
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
six==1.9.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PynamoDB
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- botocore==1.2.0
- docutils==0.18.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jmespath==0.7.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- six==1.9.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PynamoDB
| [
"pynamodb/tests/test_model.py::ModelTestCase::test_update",
"pynamodb/tests/test_model.py::ModelTestCase::test_update_item"
] | [] | [
"pynamodb/tests/test_model.py::ModelTestCase::test_batch_get",
"pynamodb/tests/test_model.py::ModelTestCase::test_batch_write",
"pynamodb/tests/test_model.py::ModelTestCase::test_batch_write_with_unprocessed",
"pynamodb/tests/test_model.py::ModelTestCase::test_car_model_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_car_model_with_null_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_complex_key",
"pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_is_complex",
"pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_conditional_operator_map_attribute",
"pynamodb/tests/test_model.py::ModelTestCase::test_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_count_no_hash_key",
"pynamodb/tests/test_model.py::ModelTestCase::test_create_model",
"pynamodb/tests/test_model.py::ModelTestCase::test_delete",
"pynamodb/tests/test_model.py::ModelTestCase::test_delete_doesnt_do_validation_on_null_attributes",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_map_four_layers_deep_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_false_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_true_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_false_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_true_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_dumps",
"pynamodb/tests/test_model.py::ModelTestCase::test_explicit_raw_map_serialize_pass",
"pynamodb/tests/test_model.py::ModelTestCase::test_filter_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_get",
"pynamodb/tests/test_model.py::ModelTestCase::test_global_index",
"pynamodb/tests/test_model.py::ModelTestCase::test_index_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_index_multipage_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_index_queries",
"pynamodb/tests/test_model.py::ModelTestCase::test_invalid_car_model_with_null_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_invalid_map_model_raises",
"pynamodb/tests/test_model.py::ModelTestCase::test_list_of_map_works_like_list_of_map",
"pynamodb/tests/test_model.py::ModelTestCase::test_list_works_like_list",
"pynamodb/tests/test_model.py::ModelTestCase::test_loads",
"pynamodb/tests/test_model.py::ModelTestCase::test_local_index",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_attrs",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_subclass_attributes_inherited_on_create",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_invalid_data_does_not_validate",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_nulls_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_pythonic_attributes",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_nulls_validates",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_works_like_model",
"pynamodb/tests/test_model.py::ModelTestCase::test_multiple_indices_share_non_key_attribute",
"pynamodb/tests/test_model.py::ModelTestCase::test_new_style_boolean_serializes_as_bool",
"pynamodb/tests/test_model.py::ModelTestCase::test_old_style_boolean_serializes_as_bool",
"pynamodb/tests/test_model.py::ModelTestCase::test_old_style_model_exception",
"pynamodb/tests/test_model.py::ModelTestCase::test_overidden_defaults",
"pynamodb/tests/test_model.py::ModelTestCase::test_overidden_session",
"pynamodb/tests/test_model.py::ModelTestCase::test_overridden_attr_name",
"pynamodb/tests/test_model.py::ModelTestCase::test_projections",
"pynamodb/tests/test_model.py::ModelTestCase::test_query",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_and_page_size",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_multiple_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_single_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_identical_to_available_items_single_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_and_page_size",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_items_multiple_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_rate_limited_scan",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_deserialize",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_from_raw_data_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_serialize_pass",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_deserializes",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_from_raw_data_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_serialize_fun_one",
"pynamodb/tests/test_model.py::ModelTestCase::test_refresh",
"pynamodb/tests/test_model.py::ModelTestCase::test_result_set_init",
"pynamodb/tests/test_model.py::ModelTestCase::test_result_set_iter",
"pynamodb/tests/test_model.py::ModelTestCase::test_save",
"pynamodb/tests/test_model.py::ModelTestCase::test_scan",
"pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit",
"pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit_with_page_size",
"pynamodb/tests/test_model.py::ModelTestCase::test_throttle",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_dict_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_initialized_instance_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_dict_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_initialized_instance_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attribute_member_with_initialized_instance_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attributes_member_with_dict_init"
] | [] | MIT License | 1,759 | 384 | [
"pynamodb/models.py"
] |
|
knknkn1162__anyloadump-17 | 30965896a2f42ac3fa92a7013f0936008e02ad5e | 2017-10-15 05:59:47 | 30965896a2f42ac3fa92a7013f0936008e02ad5e | diff --git a/anyloadump/__init__.py b/anyloadump/__init__.py
index 67c25d0..8e3dd12 100644
--- a/anyloadump/__init__.py
+++ b/anyloadump/__init__.py
@@ -1,2 +1,3 @@
__all__ = ["dump", "load"]
-from . import dump, load
+from .dump import *
+from .load import *
| call as anyloadump.load(...) or anylodump.dump(...)
Modify import in __init__.py. | knknkn1162/anyloadump | diff --git a/tests/test_dump.py b/tests/test_dump.py
index 11cd4ee..0596cbc 100644
--- a/tests/test_dump.py
+++ b/tests/test_dump.py
@@ -1,5 +1,5 @@
import unittest
-from anyloadump import dump
+import anyloadump as ald
import os
class DumpTests(unittest.TestCase):
@@ -14,7 +14,7 @@ class DumpTests(unittest.TestCase):
# test json-format
json_file = self._get_path("data/out.json")
- dump.dump(lst, self._get_path(json_file))
+ ald.dump(lst, self._get_path(json_file))
## confirm
with open(json_file, "r") as fi:
obj = json.load(fi)
@@ -23,7 +23,7 @@ class DumpTests(unittest.TestCase):
# test pickle-format
pickle_file = self._get_path("data/out.pickle")
- dump.dump(lst, self._get_path(pickle_file))
+ ald.dump(lst, self._get_path(pickle_file))
with open(pickle_file, "rb") as fi:
obj = pickle.load(fi)
self.assertEqual(lst, obj)
@@ -35,13 +35,13 @@ class DumpTests(unittest.TestCase):
lst = [1,2,3]
# test json-format
- s = dump.dumps(lst, fmt="json")
+ s = ald.dumps(lst, fmt="json")
## confirm
obj = json.loads(s)
self.assertEqual(lst, obj)
# test pickle-format
- s = dump.dumps(lst, "pickle")
+ s = ald.dumps(lst, "pickle")
## confirm
obj = pickle.loads(s)
self.assertEqual(lst, obj)
diff --git a/tests/test_load.py b/tests/test_load.py
index 1b19de1..d76de0d 100644
--- a/tests/test_load.py
+++ b/tests/test_load.py
@@ -1,5 +1,5 @@
import unittest
-from anyloadump import load
+import anyloadump as ald
import os
class LoadTests(unittest.TestCase):
@@ -16,7 +16,7 @@ class LoadTests(unittest.TestCase):
# test text_file(json)
json_file = self._get_path("data/sample.json")
- res = load.load(json_file)
+ res = ald.load(json_file)
with open(json_file, "r") as fi:
obj = json.load(fi)
@@ -24,7 +24,7 @@ class LoadTests(unittest.TestCase):
# test binary_file(pickle)
pickle_file = self._get_path("data/sample.pickle")
- res = load.load(
+ res = ald.load(
filename=pickle_file,
)
@@ -41,9 +41,9 @@ class LoadTests(unittest.TestCase):
sample = [1,2,3]
s = json.dumps(sample)
- res = load.loads(s, fmt="json") # test
+ res = ald.loads(s, fmt="json") # test
self.assertEqual(res, sample)
b = pickle.dumps(sample)
- res = load.loads(b, fmt="pickle")
+ res = ald.loads(b, fmt="pickle")
self.assertEqual(res, sample)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/knknkn1162/anyloadump.git@30965896a2f42ac3fa92a7013f0936008e02ad5e#egg=anyloadump
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: anyloadump
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- pytest-cov==6.0.0
prefix: /opt/conda/envs/anyloadump
| [
"tests/test_dump.py::DumpTests::test_dump",
"tests/test_dump.py::DumpTests::test_dumps",
"tests/test_load.py::LoadTests::test_load",
"tests/test_load.py::LoadTests::test_loads"
] | [] | [
"tests/test_dump.py::DumpTests::test_adump",
"tests/test_dump.py::DumpTests::test_adumps",
"tests/test_dump.py::DumpTests::test_xdump",
"tests/test_dump.py::DumpTests::test_xdumps"
] | [] | null | 1,763 | 107 | [
"anyloadump/__init__.py"
] |
|
networkx__networkx-2713 | 9f6c9cd6a561d41192bc29f14fd9bc16bcaad919 | 2017-10-15 17:09:15 | 93b4b9227aa8a7ac4cbd946cf3dae3b168e17b45 | diff --git a/networkx/algorithms/community/quality.py b/networkx/algorithms/community/quality.py
index 7de690af7..e04ff260d 100644
--- a/networkx/algorithms/community/quality.py
+++ b/networkx/algorithms/community/quality.py
@@ -114,7 +114,10 @@ def inter_community_edges(G, partition):
# for block in partition))
# return sum(1 for u, v in G.edges() if aff[u] != aff[v])
#
- return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size()
+ if G.is_directed():
+ return nx.quotient_graph(G, partition, create_using=nx.MultiDiGraph()).size()
+ else:
+ return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size()
def inter_community_non_edges(G, partition):
diff --git a/networkx/algorithms/simple_paths.py b/networkx/algorithms/simple_paths.py
index 763fa24d7..a2ef79671 100644
--- a/networkx/algorithms/simple_paths.py
+++ b/networkx/algorithms/simple_paths.py
@@ -333,7 +333,6 @@ def shortest_simple_paths(G, source, target, weight=None):
for path in listA:
if path[:i] == root:
ignore_edges.add((path[i - 1], path[i]))
- ignore_nodes.add(root[-1])
try:
length, spur = shortest_path_func(G, root[-1], target,
ignore_nodes=ignore_nodes,
@@ -343,6 +342,7 @@ def shortest_simple_paths(G, source, target, weight=None):
listB.push(root_length + length, path)
except nx.NetworkXNoPath:
pass
+ ignore_nodes.add(root[-1])
if listB:
path = listB.pop()
@@ -447,6 +447,8 @@ def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=
succ is a dictionary of successors from w to the target.
"""
# does BFS from both source and target and meets in the middle
+ if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
+ raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
if target == source:
return ({target: None}, {source: None}, source)
@@ -605,6 +607,8 @@ def _bidirectional_dijkstra(G, source, target, weight='weight',
shortest_path
shortest_path_length
"""
+ if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
+ raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
if source == target:
return (0, [source])
| inter_community_non_edges ignore directionality
Hi,
I think the function:
nx.algorithms.community.quality.inter_community_non_edges()
does not work properly for directed graph. It always return the non-edge of a undirected graph, basically halving the number of edges. This mean that the performance function (nx.algorithms.community.performance) will never by higher than 50% for a directed graph.
I'm using version '2.0.dev_20170801111157', python 3.5.1
Best,
Nicolas | networkx/networkx | diff --git a/networkx/algorithms/community/tests/test_quality.py b/networkx/algorithms/community/tests/test_quality.py
index 0c5b94c5a..79ce7e7f6 100644
--- a/networkx/algorithms/community/tests/test_quality.py
+++ b/networkx/algorithms/community/tests/test_quality.py
@@ -12,6 +12,7 @@ module.
"""
from __future__ import division
+from nose.tools import assert_equal
from nose.tools import assert_almost_equal
import networkx as nx
@@ -19,6 +20,7 @@ from networkx import barbell_graph
from networkx.algorithms.community import coverage
from networkx.algorithms.community import modularity
from networkx.algorithms.community import performance
+from networkx.algorithms.community.quality import inter_community_edges
class TestPerformance(object):
@@ -61,3 +63,17 @@ def test_modularity():
assert_almost_equal(-16 / (14 ** 2), modularity(G, C))
C = [{0, 1, 2}, {3, 4, 5}]
assert_almost_equal((35 * 2) / (14 ** 2), modularity(G, C))
+
+
+def test_inter_community_edges_with_digraphs():
+ G = nx.complete_graph(2, create_using = nx.DiGraph())
+ partition = [{0}, {1}]
+ assert_equal(inter_community_edges(G, partition), 2)
+
+ G = nx.complete_graph(10, create_using = nx.DiGraph())
+ partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}]
+ assert_equal(inter_community_edges(G, partition), 70)
+
+ G = nx.cycle_graph(4, create_using = nx.DiGraph())
+ partition = [{0, 1}, {2, 3}]
+ assert_equal(inter_community_edges(G, partition), 2)
diff --git a/networkx/algorithms/tests/test_simple_paths.py b/networkx/algorithms/tests/test_simple_paths.py
index e29255c32..4c701e487 100644
--- a/networkx/algorithms/tests/test_simple_paths.py
+++ b/networkx/algorithms/tests/test_simple_paths.py
@@ -220,6 +220,40 @@ def test_directed_weighted_shortest_simple_path():
cost = this_cost
+def test_weighted_shortest_simple_path_issue2427():
+ G = nx.Graph()
+ G.add_edge('IN', 'OUT', weight = 2)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 2)
+ G.add_edge('B', 'OUT', weight = 2)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'OUT'], ['IN', 'B', 'OUT']])
+ G = nx.Graph()
+ G.add_edge('IN', 'OUT', weight = 10)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 1)
+ G.add_edge('B', 'OUT', weight = 1)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'B', 'OUT'], ['IN', 'OUT']])
+
+
+def test_directed_weighted_shortest_simple_path_issue2427():
+ G = nx.DiGraph()
+ G.add_edge('IN', 'OUT', weight = 2)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 2)
+ G.add_edge('B', 'OUT', weight = 2)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'OUT'], ['IN', 'B', 'OUT']])
+ G = nx.DiGraph()
+ G.add_edge('IN', 'OUT', weight = 10)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 1)
+ G.add_edge('B', 'OUT', weight = 1)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'B', 'OUT'], ['IN', 'OUT']])
+
+
def test_weight_name():
G = nx.cycle_graph(7)
nx.set_edge_attributes(G, 1, 'weight')
@@ -303,6 +337,38 @@ def test_bidirectional_shortest_path_restricted_directed_cycle():
)
+def test_bidirectional_shortest_path_ignore():
+ G = nx.Graph()
+ nx.add_path(G, [1, 2])
+ nx.add_path(G, [1, 3])
+ nx.add_path(G, [1, 4])
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_shortest_path,
+ G,
+ 1, 2,
+ ignore_nodes=[1],
+ )
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_shortest_path,
+ G,
+ 1, 2,
+ ignore_nodes=[2],
+ )
+ G = nx.Graph()
+ nx.add_path(G, [1, 3])
+ nx.add_path(G, [1, 4])
+ nx.add_path(G, [3, 2])
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_shortest_path,
+ G,
+ 1, 2,
+ ignore_nodes=[1, 2],
+ )
+
+
def validate_path(G, s, t, soln_len, path):
assert_equal(path[0], s)
assert_equal(path[-1], t)
@@ -362,3 +428,30 @@ def test_bidirectional_dijkstra_no_path():
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5, 6])
path = _bidirectional_dijkstra(G, 1, 6)
+
+
+def test_bidirectional_dijkstra_ignore():
+ G = nx.Graph()
+ nx.add_path(G, [1, 2, 10])
+ nx.add_path(G, [1, 3, 10])
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_dijkstra,
+ G,
+ 1, 2,
+ ignore_nodes=[1],
+ )
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_dijkstra,
+ G,
+ 1, 2,
+ ignore_nodes=[2],
+ )
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_dijkstra,
+ G,
+ 1, 2,
+ ignore_nodes=[1, 2],
+ )
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
decorator==5.1.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@9f6c9cd6a561d41192bc29f14fd9bc16bcaad919#egg=networkx
nose==1.3.7
nose-ignore-docstring==0.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- decorator==5.1.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- nose-ignore-docstring==0.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/algorithms/community/tests/test_quality.py::test_inter_community_edges_with_digraphs",
"networkx/algorithms/tests/test_simple_paths.py::test_weighted_shortest_simple_path_issue2427",
"networkx/algorithms/tests/test_simple_paths.py::test_directed_weighted_shortest_simple_path_issue2427",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_ignore",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijkstra_ignore"
] | [] | [
"networkx/algorithms/community/tests/test_quality.py::TestPerformance::test_bad_partition",
"networkx/algorithms/community/tests/test_quality.py::TestPerformance::test_good_partition",
"networkx/algorithms/community/tests/test_quality.py::TestCoverage::test_bad_partition",
"networkx/algorithms/community/tests/test_quality.py::TestCoverage::test_good_partition",
"networkx/algorithms/community/tests/test_quality.py::test_modularity",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_empty_list",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_trivial_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_trivial_nonpath",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_non_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_cycle",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_missing_node",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_non_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_cycle",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_multigraph",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_multidigraph",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_cutoff",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_multigraph",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_multigraph_with_cutoff",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_directed",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_empty",
"networkx/algorithms/tests/test_simple_paths.py::test_hamiltonian_path",
"networkx/algorithms/tests/test_simple_paths.py::test_cutoff_zero",
"networkx/algorithms/tests/test_simple_paths.py::test_source_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_target_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_shortest_simple_paths",
"networkx/algorithms/tests/test_simple_paths.py::test_shortest_simple_paths_directed",
"networkx/algorithms/tests/test_simple_paths.py::test_Greg_Bernstein",
"networkx/algorithms/tests/test_simple_paths.py::test_weighted_shortest_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::test_directed_weighted_shortest_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::test_weight_name",
"networkx/algorithms/tests/test_simple_paths.py::test_ssp_source_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_ssp_target_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_ssp_multigraph",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_cycle",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_wheel",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_directed_cycle",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijksta_restricted",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijkstra_no_path"
] | [] | BSD 3-Clause | 1,766 | 664 | [
"networkx/algorithms/community/quality.py",
"networkx/algorithms/simple_paths.py"
] |
|
smarkets__marge-bot-59 | 48d0576a978af8b71f4971926e345d7d1425a8c0 | 2017-10-15 18:24:53 | 48d0576a978af8b71f4971926e345d7d1425a8c0 | diff --git a/marge/app.py b/marge/app.py
index 576ade9..29b524e 100644
--- a/marge/app.py
+++ b/marge/app.py
@@ -178,7 +178,7 @@ def main(args=sys.argv[1:]):
add_reviewers=options.add_reviewers,
reapprove=options.impersonate_approvers,
embargo=options.embargo,
- ci_timeout=timedelta(seconds=options.ci_timeout),
+ ci_timeout=options.ci_timeout,
)
)
diff --git a/marge/job.py b/marge/job.py
index b2d69fe..ae2b251 100644
--- a/marge/job.py
+++ b/marge/job.py
@@ -63,7 +63,7 @@ class MergeJob(object):
log.exception('Unexpected Git error')
merge_request.comment('Something seems broken on my local git repo; check my logs!')
raise
- except Exception:
+ except Exception as _ex:
log.exception('Unexpected Exception')
merge_request.comment("I'm broken on the inside, please somebody fix me... :cry:")
self.unassign_from_mr(merge_request)
@@ -119,11 +119,6 @@ class MergeJob(object):
log.info('Commit id to merge %r (into: %r)', actual_sha, target_sha)
time.sleep(5)
- if source_project.only_allow_merge_if_pipeline_succeeds:
- self.wait_for_ci_to_pass(source_project.id, actual_sha)
- log.info('CI passed!')
- time.sleep(2)
-
sha_now = Commit.last_on_branch(source_project.id, merge_request.source_branch, api).id
# Make sure no-one managed to race and push to the branch in the
# meantime, because we're about to impersonate the approvers, and
@@ -133,13 +128,18 @@ class MergeJob(object):
# Re-approve the merge request, in case us pushing it has removed
# approvals. Note that there is a bit of a race; effectively
# approval can't be withdrawn after we've pushed (resetting
- # approvals) and CI runs.
+ # approvals)
if self.opts.reapprove:
# approving is not idempotent, so we need to check first that there are no approvals,
# otherwise we'll get a failure on trying to re-instate the previous approvals
current_approvals = merge_request.fetch_approvals()
if not current_approvals.sufficient:
approvals.reapprove()
+
+ if source_project.only_allow_merge_if_pipeline_succeeds:
+ self.wait_for_ci_to_pass(source_project.id, actual_sha)
+ log.info('CI passed!')
+ time.sleep(2)
try:
merge_request.accept(remove_branch=True, sha=actual_sha)
except gitlab.NotAcceptable as err:
| Re-approvals only applied after successful CI run
There is a comment related to this within `marge/job.py`, including for context:
```
# Re-approve the merge request, in case us pushing it has removed
# approvals. Note that there is a bit of a race; effectively
# approval can't be withdrawn after we've pushed (resetting
# approvals) and CI runs.
```
Occasionally CI may fail due to transient network issues that are unrelated to the change made. In this case, Marge will error out and not bother attempting to reapply any approvals. GitLab doesn't remove approvals on CI failure, so it doesn't quite make sense that this happens with Marge.
This also applies to any potential exception that might occur between the force push and applying approvals, we need to restart marge and then manually approve again.
I'm unaware as to whether there is a historical reason for why approvals are reapplied when they are, but could they no be applied immediately after the rebase? | smarkets/marge-bot | diff --git a/tests/test_app.py b/tests/test_app.py
index d8a4705..ed8e64b 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -1,10 +1,141 @@
+import contextlib
import datetime
+import os
+import re
+import shlex
+import unittest.mock as mock
+from functools import wraps
-from marge.app import time_interval
+import pytest
+
+import marge.app as app
+import marge.bot as bot
+import marge.interval as interval
+import marge.job as job
+
+import tests.gitlab_api_mock as gitlab_mock
+from tests.test_user import INFO as user_info
+
+
[email protected]
+def env(**kwargs):
+ original = os.environ.copy()
+
+ os.environ.clear()
+ for k, v in kwargs.items():
+ os.environ[k] = v
+
+ yield
+
+ os.environ.clear()
+ for k, v in original.items():
+ os.environ[k] = v
+
+
[email protected]
+def main(cmdline=''):
+ def api_mock(gitlab_url, auth_token):
+ assert gitlab_url == 'http://foo.com'
+ assert auth_token in ('NON-ADMIN-TOKEN', 'ADMIN-TOKEN')
+ api = gitlab_mock.Api(gitlab_url=gitlab_url, auth_token=auth_token, initial_state='initial')
+ user_info_for_token = dict(user_info, is_admin=auth_token == 'ADMIN-TOKEN')
+ api.add_user(user_info_for_token, is_current=True)
+ return api
+
+ class DoNothingBot(bot.Bot):
+ instance = None
+
+ def start(self):
+ assert self.__class__.instance is None
+ self.__class__.instance = self
+
+ @property
+ def config(self):
+ return self._config
+
+ with mock.patch('marge.bot.Bot', new=DoNothingBot), mock.patch('marge.gitlab.Api', new=api_mock):
+ app.main(args=shlex.split(cmdline))
+ the_bot = DoNothingBot.instance
+ assert the_bot is not None
+ yield the_bot
+
+
+def test_default_values():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main() as bot:
+ assert bot.user.info == user_info
+ assert bot.config.project_regexp == re.compile('.*')
+ assert bot.config.git_timeout == datetime.timedelta(seconds=120)
+ assert bot.config.merge_opts == job.MergeJobOptions.default()
+
+def test_embargo():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main('--embargo="Fri 1pm-Mon 7am"') as bot:
+ assert bot.config.merge_opts == job.MergeJobOptions.default(
+ embargo=interval.IntervalUnion.from_human('Fri 1pm-Mon 7am'),
+ )
+
+def test_add_tested():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main('--add-tested') as bot:
+ assert bot.config.merge_opts != job.MergeJobOptions.default()
+ assert bot.config.merge_opts == job.MergeJobOptions.default(add_tested=True)
+
+def test_add_part_of():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main('--add-part-of') as bot:
+ assert bot.config.merge_opts != job.MergeJobOptions.default()
+ assert bot.config.merge_opts == job.MergeJobOptions.default(add_part_of=True)
+
+def test_add_reviewers():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with pytest.raises(AssertionError):
+ with main('--add-reviewers') as bot:
+ pass
+
+ with env(MARGE_AUTH_TOKEN="ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main('--add-reviewers') as bot:
+ assert bot.config.merge_opts != job.MergeJobOptions.default()
+ assert bot.config.merge_opts == job.MergeJobOptions.default(add_reviewers=True)
+
+
+def test_impersonate_approvers():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with pytest.raises(AssertionError):
+ with main('--impersonate-approvers') as bot:
+ pass
+
+ with env(MARGE_AUTH_TOKEN="ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main('--impersonate-approvers') as bot:
+ assert bot.config.merge_opts != job.MergeJobOptions.default()
+ assert bot.config.merge_opts == job.MergeJobOptions.default(reapprove=True)
+
+
+def test_project_regexp():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main("--project-regexp='foo.*bar'") as bot:
+ assert bot.config.project_regexp == re.compile('foo.*bar')
+
+def test_ci_timeout():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main("--ci-timeout 5m") as bot:
+ assert bot.config.merge_opts != job.MergeJobOptions.default()
+ assert bot.config.merge_opts == job.MergeJobOptions.default(ci_timeout=datetime.timedelta(seconds=5*60))
+
+def test_deprecated_max_ci_time_in_minutes():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main("--max-ci-time-in-minutes=5") as bot:
+ assert bot.config.merge_opts != job.MergeJobOptions.default()
+ assert bot.config.merge_opts == job.MergeJobOptions.default(ci_timeout=datetime.timedelta(seconds=5*60))
+
+def test_git_timeout():
+ with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'):
+ with main("--git-timeout '150 s'") as bot:
+ assert bot.config.git_timeout == datetime.timedelta(seconds=150)
# FIXME: I'd reallly prefer this to be a doctest, but adding --doctest-modules
# seems to seriously mess up the test run
def test_time_interval():
_900s = datetime.timedelta(0, 900)
- assert [time_interval(x) for x in ['15min', '15min', '.25h', '900s']] == [_900s] * 4
+ assert [app.time_interval(x) for x in ['15min', '15m', '.25h', '900s']] == [_900s] * 4
diff --git a/tests/test_job.py b/tests/test_job.py
index e9b0639..4e9031b 100644
--- a/tests/test_job.py
+++ b/tests/test_job.py
@@ -88,6 +88,11 @@ class MockLab(object):
Ok(_commit(id=rewritten_sha, status='success')),
from_state=['passed', 'merged'],
)
+ api.add_transition(
+ GET('/projects/1234/repository/branches/useless_new_feature'),
+ Ok({'commit': _commit(id=rewritten_sha, status='running')}),
+ from_state='pushed',
+ )
api.add_transition(
GET('/projects/1234/repository/branches/useless_new_feature'),
Ok({'commit': _commit(id=rewritten_sha, status='success')}),
@@ -192,14 +197,14 @@ class TestRebaseAndAccept(object):
api.add_transition(
GET('/projects/1234/repository/branches/useless_new_feature'),
Ok({'commit': _commit(id=new_branch_head_sha, status='success')}),
- from_state='passed', to_state='passed_but_head_changed'
+ from_state='pushed', to_state='pushed_but_head_changed'
)
with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased):
with mocklab.expected_failure("Someone pushed to branch while we were trying to merge"):
job = self.make_job(marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False))
job.execute()
- assert api.state == 'passed_but_head_changed'
+ assert api.state == 'pushed_but_head_changed'
assert api.notes == ["I couldn't merge this branch: Someone pushed to branch while we were trying to merge"]
def test_succeeds_second_time_if_master_moved(self, time_sleep):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==2.11.7
attrs==22.2.0
backports.zoneinfo==0.2.1
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
dateparser==1.1.3
dill==0.3.4
humanize==3.14.0
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
-e git+https://github.com/smarkets/marge-bot.git@48d0576a978af8b71f4971926e345d7d1425a8c0#egg=marge
maya==0.6.1
mccabe==0.7.0
packaging==21.3
pendulum==2.1.2
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
pytz-deprecation-shim==0.1.0.post0
pytzdata==2020.1
regex==2022.3.2
requests==2.27.1
six==1.17.0
snaptime==0.2.4
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
tzdata==2025.2
tzlocal==4.2
urllib3==1.26.20
wrapt==1.16.0
zipp==3.6.0
| name: marge-bot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==2.11.7
- attrs==22.2.0
- backports-zoneinfo==0.2.1
- charset-normalizer==2.0.12
- coverage==6.2
- dateparser==1.1.3
- dill==0.3.4
- humanize==3.14.0
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- maya==0.6.1
- mccabe==0.7.0
- packaging==21.3
- pendulum==2.1.2
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pytz-deprecation-shim==0.1.0.post0
- pytzdata==2020.1
- regex==2022.3.2
- requests==2.27.1
- six==1.17.0
- snaptime==0.2.4
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- tzdata==2025.2
- tzlocal==4.2
- urllib3==1.26.20
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/marge-bot
| [
"tests/test_app.py::test_default_values",
"tests/test_app.py::test_embargo",
"tests/test_app.py::test_add_tested",
"tests/test_app.py::test_add_part_of",
"tests/test_app.py::test_add_reviewers",
"tests/test_app.py::test_impersonate_approvers",
"tests/test_app.py::test_project_regexp",
"tests/test_app.py::test_ci_timeout",
"tests/test_app.py::test_deprecated_max_ci_time_in_minutes",
"tests/test_app.py::test_git_timeout",
"tests/test_job.py::TestRebaseAndAccept::test_fails_on_not_acceptable_if_master_did_not_move"
] | [] | [
"tests/test_app.py::test_time_interval",
"tests/test_job.py::TestRebaseAndAccept::test_succeeds_first_time",
"tests/test_job.py::TestRebaseAndAccept::test_succeeds_second_time_if_master_moved",
"tests/test_job.py::TestRebaseAndAccept::test_handles_races_for_merging",
"tests/test_job.py::TestRebaseAndAccept::test_handles_request_becoming_wip_after_push",
"tests/test_job.py::TestRebaseAndAccept::test_guesses_git_hook_error_on_merge_refusal",
"tests/test_job.py::TestRebaseAndAccept::test_tells_explicitly_that_gitlab_refused_to_merge",
"tests/test_job.py::TestRebaseAndAccept::test_wont_merge_wip_stuff",
"tests/test_job.py::TestRebaseAndAccept::test_wont_merge_branches_with_autosquash_if_rewriting",
"tests/test_job.py::TestMergeJobOptions::test_default",
"tests/test_job.py::TestMergeJobOptions::test_default_ci_time"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,767 | 645 | [
"marge/app.py",
"marge/job.py"
] |
|
stfc__fparser-47 | 108c0f201abbabbde3ceb9fcc3ff7ffa4b0c3dbd | 2017-10-18 12:31:33 | d2feb470be5b707937c384b074f92762df0a2481 | coveralls:
[](https://coveralls.io/builds/13772596)
Coverage increased (+0.04%) to 82.144% when pulling **39d093df3ba0182d045aea2f651a512d9e8f59aa on open_wo_named_unit** into **3cc5c52f00654edb58b6a7f8bfba6e7225818045 on master**.
arporter: I've just tested this branch with the ORCA2_LIM configuration of NEMO. Modulo the problem with INCLUDE statements (they get removed), fparser2 successfully parses and re-generates the whole code base which I've then compiled (with the Intel compiler).
arporter: In trying to get test coverage for the missed lines highlighted in the review I discovered that the parser failed if a keyword is misspelt (e.g "aunit=23" instead of "unit=23"). I've fixed that bug and obtained coverage of the modified lines. It's probable that this bug will exist in other places in the code but I haven't checked. This possibly points to the need to have a KeywordValueList base class that would cut down on code duplication. | diff --git a/src/fparser/Fortran2003.py b/src/fparser/Fortran2003.py
index 429ca80..20f3dca 100644
--- a/src/fparser/Fortran2003.py
+++ b/src/fparser/Fortran2003.py
@@ -161,8 +161,16 @@ class Base(ComparableMixin):
subclasses = {}
@show_result
- def __new__(cls, string, parent_cls = None):
+ def __new__(cls, string, parent_cls=None):
"""
+ Create a new instance of this object.
+
+ :param cls: the class of object to create
+ :type cls: :py:type:`type`
+ :param string: (source of) Fortran string to parse
+ :type string: str or :py:class:`FortranReaderBase`
+ :param parent_cls: the parent class of this object
+ :type parent_cls: :py:type:`type`
"""
if parent_cls is None:
parent_cls = [cls]
@@ -218,12 +226,9 @@ class Base(ComparableMixin):
obj = None
if obj is not None:
return obj
-
else:
raise AssertionError(repr(result))
errmsg = '%s: %r' % (cls.__name__, string)
- #if isinstance(string, FortranReaderBase) and string.fifo_item:
- # errmsg += ' while reaching %s' % (string.fifo_item[-1])
raise NoMatchError(errmsg)
## def restore_reader(self):
@@ -5379,6 +5384,7 @@ class Internal_File_Variable(Base): # R903
"""
subclass_names = ['Char_Variable']
+
class Open_Stmt(StmtBase, CALLBase): # R904
"""
<open-stmt> = OPEN ( <connect-spec-list> )
@@ -5387,10 +5393,15 @@ class Open_Stmt(StmtBase, CALLBase): # R904
use_names = ['Connect_Spec_List']
@staticmethod
def match(string):
- return CALLBase.match('OPEN', Connect_Spec_List, string, require_rhs=True)
+ # The Connect_Spec_List class is generated automatically
+ # by code at the end of this module
+ return CALLBase.match('OPEN', Connect_Spec_List, string,
+ require_rhs=True)
-class Connect_Spec(KeywordValueBase): # R905
+
+class Connect_Spec(KeywordValueBase):
"""
+ R905
<connect-spec> = [ UNIT = ] <file-unit-number>
| ACCESS = <scalar-default-char-expr>
| ACTION = <scalar-default-char-expr>
@@ -5412,26 +5423,40 @@ class Connect_Spec(KeywordValueBase): # R905
| STATUS = <scalar-default-char-expr>
"""
subclass_names = []
- use_names = ['File_Unit_Number', 'Scalar_Default_Char_Expr', 'Label', 'File_Name_Expr', 'Iomsg_Variable',
+ use_names = ['File_Unit_Number', 'Scalar_Default_Char_Expr', 'Label',
+ 'File_Name_Expr', 'Iomsg_Variable',
'Scalar_Int_Expr', 'Scalar_Int_Variable']
+
+ @staticmethod
def match(string):
- for (k,v) in [\
- (['ACCESS','ACTION','ASYNCHRONOUS','BLANK','DECIMAL','DELIM','ENCODING',
- 'FORM','PAD','POSITION','ROUND','SIGN','STATUS'], Scalar_Default_Char_Expr),
- ('ERR', Label),
- ('FILE',File_Name_Expr),
- ('IOSTAT', Scalar_Int_Variable),
- ('IOMSG', Iomsg_Variable),
- ('RECL', Scalar_Int_Expr),
- ('UNIT', File_Unit_Number),
- ]:
+ '''
+ :param str string: Fortran code to check for a match
+ :return: 2-tuple containing the keyword and value or None if the
+ supplied string is not a match
+ :rtype: 2-tuple containing keyword (e.g. "UNIT") and associated value
+ '''
+ if "=" not in string:
+ # The only argument which need not be named is the unit number
+ return 'UNIT', File_Unit_Number(string)
+ # We have a keyword-value pair. Check whether it is valid...
+ for (keyword, value) in [
+ (['ACCESS', 'ACTION', 'ASYNCHRONOUS', 'BLANK', 'DECIMAL',
+ 'DELIM', 'ENCODING', 'FORM', 'PAD', 'POSITION', 'ROUND',
+ 'SIGN', 'STATUS'], Scalar_Default_Char_Expr),
+ ('ERR', Label),
+ ('FILE', File_Name_Expr),
+ ('IOSTAT', Scalar_Int_Variable),
+ ('IOMSG', Iomsg_Variable),
+ ('RECL', Scalar_Int_Expr),
+ ('UNIT', File_Unit_Number)]:
try:
- obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
+ obj = KeywordValueBase.match(keyword, value, string,
+ upper_lhs=True)
except NoMatchError:
obj = None
- if obj is not None: return obj
- return 'UNIT', File_Unit_Number
- match = staticmethod(match)
+ if obj is not None:
+ return obj
+ return None
class File_Name_Expr(Base): # R906
@@ -6027,7 +6052,7 @@ items : (Inquire_Spec_List, Scalar_Int_Variable, Output_Item_List)
class Inquire_Spec(KeywordValueBase): # R930
"""
-:F03R:`930`::
+ :F03R:`930`::
<inquire-spec> = [ UNIT = ] <file-unit-number>
| FILE = <file-name-expr>
| ACCESS = <scalar-default-char-variable>
@@ -6065,9 +6090,9 @@ class Inquire_Spec(KeywordValueBase): # R930
| UNFORMATTED = <scalar-default-char-variable>
| WRITE = <scalar-default-char-variable>
-Attributes
-----------
-items : (str, instance)
+ Attributes
+ ----------
+ items : (str, instance)
"""
subclass_names = []
use_names = ['File_Unit_Number', 'File_Name_Expr',
@@ -6077,6 +6102,18 @@ items : (str, instance)
@staticmethod
def match(string):
+ '''
+ :param str string: The string to check for conformance with an
+ Inquire_Spec
+ :return: 2-tuple of name (e.g. "UNIT") and value or None if
+ string is not a valid Inquire_Spec
+ :rtype: 2-tuple where first object represents the name and the
+ second the value.
+ '''
+ if "=" not in string:
+ # The only argument which need not be named is the unit number
+ return 'UNIT', File_Unit_Number(string)
+ # We have a keyword-value pair. Check whether it is valid...
for (keyword, value) in [
(['ACCESS', 'ACTION', 'ASYNCHRONOUS', 'BLANK', 'DECIMAL',
'DELIM', 'DIRECT', 'ENCODING', 'FORM', 'NAME', 'PAD',
@@ -6092,11 +6129,14 @@ items : (str, instance)
('IOMSG', Iomsg_Variable),
('FILE', File_Name_Expr),
('UNIT', File_Unit_Number)]:
- obj = KeywordValueBase.match(keyword, value, string,
- upper_lhs=True)
+ try:
+ obj = KeywordValueBase.match(keyword, value, string,
+ upper_lhs=True)
+ except NoMatchError:
+ obj = None
if obj is not None:
return obj
- return 'UNIT', File_Unit_Number(string)
+ return None
###############################################################################
############################### SECTION 10 ####################################
@@ -7561,14 +7601,16 @@ ClassType = type(Base)
_names = dir()
for clsname in _names:
cls = eval(clsname)
- if not (isinstance(cls, ClassType) and issubclass(cls, Base) and not cls.__name__.endswith('Base')): continue
+ if not (isinstance(cls, ClassType) and issubclass(cls, Base) and
+ not cls.__name__.endswith('Base')):
+ continue
names = getattr(cls, 'subclass_names', []) + getattr(cls, 'use_names', [])
for n in names:
if n in _names: continue
if n.endswith('_List'):
_names.append(n)
n = n[:-5]
- #print 'Generating %s_List' % (n)
+ # Generate 'list' class
exec('''\
class %s_List(SequenceBase):
subclass_names = [\'%s\']
| fparser2: generate correct OPEN call when unit number argument is not named
When parsing the following OPEN call:
OPEN( idrst, FILE = TRIM(cdname), FORM = 'unformatted', ACCESS = 'direct' &
& , RECL = 8, STATUS = 'old', ACTION = 'read', IOSTAT = ios, ERR = 987 )
fparser2 generates:
OPEN(UNIT = <class 'fparser.Fortran2003.File_Unit_Number'>, FILE = TRIM(cdname), FORM = 'unformatted', ACCESS = 'direct', RECL = 8, STATUS = 'old', ACTION = 'read', IOSTAT = ios, ERR = 987)
i.e. the fact that the unit number argument isn't named in the original call appears to cause problems. | stfc/fparser | diff --git a/src/fparser/tests/fparser2/test_Fortran2003.py b/src/fparser/tests/fparser2/test_Fortran2003.py
index 9b33289..e373b81 100644
--- a/src/fparser/tests/fparser2/test_Fortran2003.py
+++ b/src/fparser/tests/fparser2/test_Fortran2003.py
@@ -2811,7 +2811,7 @@ def test_Inquire_Stmt(): # R929
def test_Inquire_Spec(): # R930
''' Test that we recognise the various possible forms of
- inquire list '''
+ entries in an inquire list '''
cls = Inquire_Spec
obj = cls('1')
assert isinstance(obj, cls), repr(obj)
@@ -2837,6 +2837,128 @@ def test_Inquire_Spec(): # R930
assert_equal(str(obj), 'DIRECT = a')
+def test_Inquire_Spec_List(): # pylint: disable=invalid-name
+ ''' Test that we recognise the various possible forms of
+ inquire list - R930
+ '''
+ # Inquire_Spec_List is generated at runtime in Fortran2003.py
+ cls = Inquire_Spec_List
+
+ obj = cls('unit=23, file="a_file.dat"')
+ assert isinstance(obj, cls)
+ assert str(obj) == 'UNIT = 23, FILE = "a_file.dat"'
+
+ # Invalid list (afile= instead of file=)
+ with pytest.raises(NoMatchError) as excinfo:
+ _ = cls('unit=23, afile="a_file.dat"')
+ assert "NoMatchError: Inquire_Spec_List: 'unit=23, afile=" in str(excinfo)
+
+
+def test_Open_Stmt():
+ ''' Check that we correctly parse and re-generate the various forms
+ of OPEN statement (R904)'''
+ cls = Open_Stmt
+ obj = cls("open(23, file='some_file.txt')")
+ assert isinstance(obj, cls)
+ assert str(obj) == "OPEN(UNIT = 23, FILE = 'some_file.txt')"
+ obj = cls("open(unit=23, file='some_file.txt')")
+ assert isinstance(obj, cls)
+ assert str(obj) == "OPEN(UNIT = 23, FILE = 'some_file.txt')"
+
+
+def test_Connect_Spec():
+ ''' Tests for individual elements of Connect_Spec (R905) '''
+ cls = Connect_Spec
+ # Incorrect name for a member of the list
+ with pytest.raises(NoMatchError) as excinfo:
+ _ = cls("afile='a_file.dat'")
+ assert 'NoMatchError: Connect_Spec: "afile=' in str(excinfo)
+
+
+def test_Connect_Spec_List(): # pylint: disable=invalid-name
+ '''
+ Check that we correctly parse the various valid forms of
+ connect specification (R905)
+ '''
+ cls = Connect_Spec_List
+ obj = cls("22, access='direct'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, ACCESS = 'direct'"
+
+ obj = cls("22, action='read'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, ACTION = 'read'"
+
+ obj = cls("22, asynchronous='YES'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, ASYNCHRONOUS = 'YES'"
+
+ obj = cls("22, blank='NULL'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, BLANK = 'NULL'"
+
+ obj = cls("22, decimal='COMMA'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, DECIMAL = 'COMMA'"
+
+ obj = cls("22, delim='APOSTROPHE'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, DELIM = 'APOSTROPHE'"
+
+ obj = cls("22, err=109")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, ERR = 109"
+
+ obj = cls("22, encoding='DEFAULT'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, ENCODING = 'DEFAULT'"
+
+ obj = cls("22, file='a_file.dat'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat'"
+
+ obj = cls("22, file='a_file.dat', form='FORMATTED'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', FORM = 'FORMATTED'"
+
+ obj = cls("22, file='a_file.dat', iomsg=my_string")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', IOMSG = my_string"
+
+ obj = cls("22, file='a_file.dat', iostat=ierr")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', IOSTAT = ierr"
+
+ obj = cls("22, file='a_file.dat', pad='YES'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', PAD = 'YES'"
+
+ obj = cls("22, file='a_file.dat', position='APPEND'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', POSITION = 'APPEND'"
+
+ obj = cls("22, file='a_file.dat', recl=100")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', RECL = 100"
+
+ obj = cls("22, file='a_file.dat', round='UP'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', ROUND = 'UP'"
+
+ obj = cls("22, file='a_file.dat', sign='PLUS'")
+ assert isinstance(obj, cls)
+ assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', SIGN = 'PLUS'"
+
+ obj = cls("22, file='a_file.dat', sign='PLUS', status='OLD'")
+ assert isinstance(obj, cls)
+ assert str(obj) == ("UNIT = 22, FILE = 'a_file.dat', SIGN = 'PLUS', "
+ "STATUS = 'OLD'")
+
+ # Incorrect name for a member of the list
+ with pytest.raises(NoMatchError) as excinfo:
+ _ = cls("unit=22, afile='a_file.dat', sign='PLUS', status='OLD'")
+ assert 'NoMatchError: Connect_Spec_List: "unit=22, afile=' in str(excinfo)
+
###############################################################################
############################### SECTION 10 ####################################
@@ -3664,42 +3786,43 @@ def test_Contains(): # R1237
if 0:
- nof_needed_tests = 0
- nof_needed_match = 0
- total_needs = 0
- total_classes = 0
- for name in dir():
- obj = eval(name)
- if not isinstance(obj, ClassType): continue
- if not issubclass(obj, Base): continue
- clsname = obj.__name__
- if clsname.endswith('Base'): continue
- total_classes += 1
- subclass_names = obj.__dict__.get('subclass_names',None)
- use_names = obj.__dict__.get('use_names',None)
- if not use_names: continue
- match = obj.__dict__.get('match',None)
+ NOF_NEEDED_TESTS = 0
+ NOF_NEEDED_MATCH = 0
+ TOTAL_NEEDS = 0
+ TOTAL_CLASSES = 0
+ for NAME in dir():
+ OBJ = eval(NAME)
+ if not isinstance(OBJ, ClassType): continue
+ if not issubclass(OBJ, Base): continue
+ CLSNAME = OBJ.__name__
+ if CLSNAME.endswith('Base'): continue
+ TOTAL_CLASSES += 1
+ SUBCLASS_NAMES = OBJ.__dict__.get('subclass_names', None)
+ USE_NAMES = OBJ.__dict__.get('use_names', None)
+ if not USE_NAMES: continue
+ MATCH = OBJ.__dict__.get('match', None)
try:
- test_cls = eval('test_%s' % (clsname))
+ TEST_CLS = eval('test_{0}'.format(CLSNAME))
except NameError:
- test_cls = None
- total_needs += 1
- if match is None:
- if test_cls is None:
- print('Needs tests:', clsname)
- print('Needs match implementation:', clsname)
- nof_needed_tests += 1
- nof_needed_match += 1
+ TEST_CLS = None
+ TOTAL_NEEDS += 1
+ if MATCH is None:
+ if TEST_CLS is None:
+ print('Needs tests:', CLSNAME)
+ print('Needs match implementation:', CLSNAME)
+ NOF_NEEDED_TESTS += 1
+ NOF_NEEDED_MATCH += 1
else:
- print('Needs match implementation:', clsname)
- nof_needed_match += 1
+ print('Needs match implementation:', CLSNAME)
+ NOF_NEEDED_MATCH += 1
else:
- if test_cls is None:
- print('Needs tests:', clsname)
- nof_needed_tests += 1
+ if TEST_CLS is None:
+ print('Needs tests:', CLSNAME)
+ NOF_NEEDED_TESTS += 1
continue
print('-----')
- print('Nof match implementation needs:',nof_needed_match,'out of',total_needs)
- print('Nof tests needs:',nof_needed_tests,'out of',total_needs)
- print('Total number of classes:',total_classes)
+ print('Nof match implementation needs:', NOF_NEEDED_MATCH,
+ 'out of', TOTAL_NEEDS)
+ print('Nof tests needs:', NOF_NEEDED_TESTS, 'out of', TOTAL_NEEDS)
+ print('Total number of classes:', TOTAL_CLASSES)
print('-----')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"six",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/stfc/fparser.git@108c0f201abbabbde3ceb9fcc3ff7ffa4b0c3dbd#egg=fparser
iniconfig==2.1.0
nose==1.3.7
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
tzdata==2025.2
| name: fparser
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- tzdata==2025.2
prefix: /opt/conda/envs/fparser
| [
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Open_Stmt"
] | [
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Inquire_Spec_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Connect_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Connect_Spec_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Edit_Desc"
] | [
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Program",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Specification_Part",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Name",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Value",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Intrinsic_Type_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Kind_Selector",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Signed_Int_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Int_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Binary_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Octal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Hex_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Signed_Real_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Real_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Char_Selector",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Complex_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Name",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Length_Selector",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Char_Length",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Char_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Logical_Literal_Constant",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Derived_Type_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Attr_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Type_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Sequence_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Def_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Decl",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Attr_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Attr_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Decl",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Component_Def_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Bound_Procedure_Part",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Binding_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Specific_Binding",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Generic_Binding",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Final_Binding",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Derived_Type_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Spec_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Structure_Constructor_2",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Structure_Constructor",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Spec_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Enum_Def",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Enum_Def_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Array_Constructor",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Value_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Implied_Do",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Implied_Do_Control",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Declaration_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Declaration_Type_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Attr_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Dimension_Attr_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Intent_Attr_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Entity_Decl",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Target_Entity_Decl",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Access_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Language_Binding_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Explicit_Shape_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Upper_Bound",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Assumed_Shape_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Deferred_Shape_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Assumed_Size_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Access_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Stmt_Set",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Implied_Do",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Dimension_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Intent_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Optional_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Parameter_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Named_Constant_Def",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Pointer_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Pointer_Decl",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Protected_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Save_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Saved_Entity",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Target_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Value_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Volatile_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Implicit_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Implicit_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Letter_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Namelist_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Equivalence_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Common_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Common_Block_Object",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Substring",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Substring_Range",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Ref",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Part_Ref",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Inquiry",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Array_Section",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Section_Subscript",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Section_Subscript_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Subscript_Triplet",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Allocate_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Alloc_Opt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Nullify_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Deallocate_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Primary",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Parenthesis",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_1_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Mult_Operand",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Add_Operand",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_2_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_2_Unary_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_3_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_4_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_And_Operand",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Or_Operand",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Equiv_Operand",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_5_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Logical_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Logical_Initialization_Expr",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Assignment_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Pointer_Assignment_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Component_Ref",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Where_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Where_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Where_Construct_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Forall_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Forall_Header",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Forall_Triplet_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_If_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_if_nonblock_do",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Case_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Case_Selector",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Associate_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Select_Type_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Select_Type_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Guard_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Block_Label_Do_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Block_Nonlabel_Do_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Label_Do_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Nonblock_Do_Construct",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Continue_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Stop_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Unit",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_read_stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_write_stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Print_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Control_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Control_Spec_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Format",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Implied_Do",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Implied_Do_Control",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Wait_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Wait_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Backspace_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Endfile_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Rewind_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Position_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Flush_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Flush_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Inquire_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Inquire_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Specification",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Item",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Item_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Main_Program",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Module",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Module_Subprogram_Part",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Use_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Module_Nature",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Rename",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Block_Data",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Block",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Specification",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Interface_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Body",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Subroutine_Body",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Body",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Procedure_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Generic_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Dtio_Generic_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Import_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_External_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Procedure_Declaration_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Attr_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Decl",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Intrinsic_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Reference",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Call_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Procedure_Designator",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Actual_Arg_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Actual_Arg_Spec_List",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Alt_Return_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Subprogram",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Dummy_Arg_Name",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Prefix",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Prefix_Spec",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Suffix",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Function_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Subroutine_Subprogram",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Subroutine_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Dummy_Arg",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Subroutine_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Entry_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Return_Stmt",
"src/fparser/tests/fparser2/test_Fortran2003.py::test_Contains"
] | [] | BSD License | 1,778 | 2,081 | [
"src/fparser/Fortran2003.py"
] |
jupyter__nbgrader-895 | fc13b045cc085bb2a5355131b8e0f20dd7607884 | 2017-10-18 20:34:17 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | mpacer: Also, my tests pass on both py2 and py3, not sure why they would be failing on Appveyor. The errors also seem unreleated to what I touched.
mpacer: So much code sharing led me to think this should be genericised.
The method for defining the gradebook update method seemed a little hacky, but I couldn't think of any other way to do pass through the gradebook instance from within the context manager.
jhamrick: Unfortunately it does look like replacing the traitlets with properties causes issues when building the docs 😞
mpacer: I can use traitlet's dynamic defaults instead :).
mpacer: I actually don't think that name was ever a traitlet, or we wouldn't be able to run into this problem.
mpacer: also… looking at the doc generation code, it needs to be a class attribute, not an instance attribute, so traitlets couldn't work.
```python
name = cls.name.replace(" ", "-")
``` | diff --git a/nbgrader/apps/dbapp.py b/nbgrader/apps/dbapp.py
index 8bab3443..ee9a543c 100644
--- a/nbgrader/apps/dbapp.py
+++ b/nbgrader/apps/dbapp.py
@@ -6,11 +6,11 @@ import os
import shutil
from textwrap import dedent
-from traitlets import default, Unicode, Bool
+from traitlets import default, Unicode, Bool, List
from datetime import datetime
from . import NbGrader
-from ..api import Gradebook, MissingEntry
+from ..api import Gradebook, MissingEntry, Student, Assignment
from .. import dbutil
aliases = {
@@ -116,16 +116,43 @@ class DbStudentRemoveApp(NbGrader):
gb.remove_student(student_id)
-class DbStudentImportApp(NbGrader):
-
- name = 'nbgrader-db-student-import'
- description = 'Import students into the nbgrader database from a CSV file'
+class DbGenericImportApp(NbGrader):
aliases = aliases
flags = flags
+ expected_keys = List(help="These are the keys expected by the database")
+
+ def db_update_method_name(self):
+ """
+ Name of the update method used on the Gradebook for this import app.
+ It is expected to have the signature:
+ * instance_id : string, identifies which instance you are updating based on self.primary_key
+ * instance : dictionary, contents for the update from the parsed csv rows; unpacked as kwargs
+ """
+ raise NotImplementedError
+
+ name = ""
+ description = ""
+
+
+ @property
+ def table_class(self):
+ raise NotImplementedError
+
+ @property
+ def primary_key_default(self):
+ """
+ The key for the instance_id passed to the get_db_update_method.
+ """
+ raise NotImplementedError
+
+ @default("expected_keys")
+ def expected_keys_default(self):
+ return self.table_class.__table__.c.keys()
+
def start(self):
- super(DbStudentImportApp, self).start()
+ super(DbGenericImportApp, self).start()
if len(self.extra_args) != 1:
self.fail("Path to CSV file not provided.")
@@ -133,31 +160,69 @@ class DbStudentImportApp(NbGrader):
path = self.extra_args[0]
if not os.path.exists(path):
self.fail("No such file: '%s'", path)
- self.log.info("Importing students from: '%s'", path)
+ self.log.info("Importing from: '%s'", path)
- allowed_keys = ["last_name", "first_name", "email", "id"]
with Gradebook(self.coursedir.db_url) as gb:
with open(path, 'r') as fh:
reader = csv.DictReader(fh)
+ reader.fieldnames = self._preprocess_keys(reader.fieldnames)
for row in reader:
- if "id" not in row:
- self.fail("Malformatted CSV file: must contain a column for 'id'")
+ if self.primary_key not in row:
+ self.fail("Malformatted CSV file: must contain a column for '%s'" % self.primary_key)
# make sure all the keys are actually allowed in the database,
# and that any empty strings are parsed as None
- student = {}
+ instance = {}
for key, val in row.items():
- if key not in allowed_keys:
+ if key not in self.expected_keys:
continue
if val == '':
- student[key] = None
+ instance[key] = None
else:
- student[key] = val
- student_id = student.pop("id")
+ instance[key] = val
+ instance_primary_key = instance.pop(self.primary_key)
+
+
+ self.log.info("Creating/updating %s with %s '%s': %s",
+ self.table_class.__name__,
+ self.primary_key,
+ instance_primary_key,
+ instance)
+ db_update_method = getattr(gb, self.db_update_method_name)
+ db_update_method(instance_primary_key, **instance)
- self.log.info("Creating/updating student with ID '%s': %s", student_id, student)
- gb.update_or_create_student(student_id, **student)
+
+ def _preprocess_keys(self, keys):
+ """
+ Helper function for preprocessing keys
+ """
+ proposed_keys = [key.strip() for key in keys]
+ unknown_keys = [k for k in proposed_keys if k not in self.expected_keys]
+ if unknown_keys:
+ self.log.info("Unknown keys in csv: '%s'",
+ (', '.join(unknown_keys[:-1])
+ + 'and '
+ + unknown_keys[-1]))
+ return proposed_keys
+
+
+class DbStudentImportApp(DbGenericImportApp):
+
+ name = 'nbgrader-db-student-import'
+ description = 'Import students into the nbgrader database from a CSV file'
+
+ @property
+ def table_class(self):
+ return Student
+
+ @property
+ def primary_key(self):
+ return "id"
+
+ @property
+ def db_update_method_name(self):
+ return "update_or_create_student"
class DbStudentListApp(NbGrader):
@@ -258,49 +323,22 @@ class DbAssignmentRemoveApp(NbGrader):
gb.remove_assignment(assignment_id)
-class DbAssignmentImportApp(NbGrader):
+class DbAssignmentImportApp(DbGenericImportApp):
name = 'nbgrader-db-assignment-import'
description = 'Import assignments into the nbgrader database from a CSV file'
- aliases = aliases
- flags = flags
-
- def start(self):
- super(DbAssignmentImportApp, self).start()
-
- if len(self.extra_args) != 1:
- self.fail("Path to CSV file not provided.")
-
- path = self.extra_args[0]
- if not os.path.exists(path):
- self.fail("No such file: '%s'", path)
- self.log.info("Importing assignments from: '%s'", path)
-
- allowed_keys = ["duedate", "name"]
-
- with Gradebook(self.coursedir.db_url) as gb:
- with open(path, 'r') as fh:
- reader = csv.DictReader(fh)
- for row in reader:
- if "name" not in row:
- self.fail("Malformatted CSV file: must contain a column for 'name'")
-
- # make sure all the keys are actually allowed in the database,
- # and that any empty strings are parsed as None
- assignment = {}
- for key, val in row.items():
- if key not in allowed_keys:
- continue
- if val == '':
- assignment[key] = None
- else:
- assignment[key] = val
- assignment_id = assignment.pop("name")
+ @property
+ def table_class(self):
+ return Assignment
- self.log.info("Creating/updating assignment with name '%s': %s", assignment_id, assignment)
- gb.update_or_create_assignment(assignment_id, **assignment)
+ @property
+ def primary_key(self):
+ return "name"
+ @property
+ def db_update_method_name(self):
+ return "update_or_create_assignment"
class DbAssignmentListApp(NbGrader):
| Not importing emails when Importing students from .csv
### Operating system
`Ubuntu 16.04`
### `nbgrader --version`
`nbgrader version 0.5.2`
### Expected behavior
I'm creating the students database from a `csv` file. My csv files has:
`id,last_name,first_name,email` so I expect when I do `nbgrader db student list` to have all the fields
for each student.
### Actual behavior
For some reason the field corresponding to email it's not being import. For example in the csv file I have:
`id,last_name,first_name,email`
`stud1, smith, blabla, [email protected]`
and when creating the database I got:
`Creating/updating student with ID 'stud1': {'last_name': 'smith', 'first_name': 'blabla'}`
when listing students I see:
`stud1 (smith, blabla) -- None`
Any clue what is going on?
| jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_db.py b/nbgrader/tests/apps/test_nbgrader_db.py
index fd0803cb..5b7789da 100644
--- a/nbgrader/tests/apps/test_nbgrader_db.py
+++ b/nbgrader/tests/apps/test_nbgrader_db.py
@@ -176,6 +176,28 @@ class TestNbGraderDb(BaseTestApp):
assert student.first_name is None
assert student.email is None
+
+ def test_student_import_csv_spaces(self, db, temp_cwd):
+ with open("students.csv", "w") as fh:
+ fh.write(dedent(
+ """
+ id,first_name,last_name, email
+ foo,abc,xyz,[email protected]
+ bar,,,
+ """
+ ).strip())
+
+ run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
+ with Gradebook(db) as gb:
+ student = gb.find_student("foo")
+ assert student.last_name == "xyz"
+ assert student.first_name == "abc"
+ assert student.email == "[email protected]"
+ student = gb.find_student("bar")
+ assert student.last_name is None
+ assert student.first_name is None
+ assert student.email is None
+
def test_assignment_add(self, db):
run_nbgrader(["db", "assignment", "add", "foo", "--db", db])
with Gradebook(db) as gb:
@@ -256,6 +278,24 @@ class TestNbGraderDb(BaseTestApp):
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
+
+ def test_assignment_import_csv_spaces(self, db, temp_cwd):
+ with open("assignments.csv", "w") as fh:
+ fh.write(dedent(
+ """
+ name, duedate
+ foo,Sun Jan 8 2017 4:31:22 PM
+ bar,
+ """
+ ).strip())
+
+ run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
+ with Gradebook(db) as gb:
+ assignment = gb.find_assignment("foo")
+ assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
+ assignment = gb.find_assignment("bar")
+ assert assignment.duedate is None
+
# check that it fails when no id column is given
with open("assignments.csv", "w") as fh:
fh.write(dedent(
@@ -284,7 +324,6 @@ class TestNbGraderDb(BaseTestApp):
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
-
def test_upgrade_nodb(self, temp_cwd):
# test upgrading without a database
run_nbgrader(["db", "upgrade"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r dev-requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-rerunfailures",
"coverage",
"selenium",
"invoke",
"sphinx",
"codecov",
"cov-core",
"nbval"
],
"pre_install": [
"pip install -U pip wheel setuptools"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@fc13b045cc085bb2a5355131b8e0f20dd7607884#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- setuptools==59.6.0
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_import_csv_spaces",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_import_csv_spaces"
] | [] | [
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_help",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_no_args",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_add",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_remove",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_remove_with_submissions",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_list",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_import",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_add",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_remove",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_remove_with_submissions",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_list",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_import",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_nodb",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_current_db",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_old_db"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,779 | 1,696 | [
"nbgrader/apps/dbapp.py"
] |
peter-wangxu__persist-queue-28 | 8cd900781aa449d2e921bf5db953d02815110646 | 2017-10-20 14:55:36 | 7a2c4d3768dfd6528cc8c1599ef773ebf310697b | codecov[bot]: # [Codecov](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=h1) Report
> Merging [#28](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=desc) into [master](https://codecov.io/gh/peter-wangxu/persist-queue/commit/8cd900781aa449d2e921bf5db953d02815110646?src=pr&el=desc) will **increase** coverage by `0.04%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #28 +/- ##
==========================================
+ Coverage 93.89% 93.94% +0.04%
==========================================
Files 6 6
Lines 377 380 +3
Branches 44 44
==========================================
+ Hits 354 357 +3
Misses 11 11
Partials 12 12
```
| Flag | Coverage Δ | |
|---|---|---|
| #python | `93.94% <100%> (+0.04%)` | :arrow_up: |
| [Impacted Files](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [persistqueue/sqlbase.py](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=tree#diff-cGVyc2lzdHF1ZXVlL3NxbGJhc2UucHk=) | `94.5% <100%> (+0.18%)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=footer). Last update [8cd9007...b5fa7cf](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/persistqueue/sqlbase.py b/persistqueue/sqlbase.py
index 48955f7..e0a7672 100644
--- a/persistqueue/sqlbase.py
+++ b/persistqueue/sqlbase.py
@@ -79,6 +79,7 @@ class SQLiteBase(object):
def _init(self):
"""Initialize the tables in DB."""
+
if self.path == self._MEMORY:
self.memory_sql = True
log.debug("Initializing Sqlite3 Queue in memory.")
@@ -99,19 +100,26 @@ class SQLiteBase(object):
if not self.memory_sql:
self._putter = self._new_db_connection(
self.path, self.multithreading, self.timeout)
-
+ if self.auto_commit is False:
+ log.warning('auto_commit=False is still experimental,'
+ 'only use it with care.')
+ self._getter.isolation_level = "DEFERRED"
+ self._putter.isolation_level = "DEFERRED"
# SQLite3 transaction lock
self.tran_lock = threading.Lock()
self.put_event = threading.Event()
def _new_db_connection(self, path, multithreading, timeout):
+ conn = None
if path == self._MEMORY:
- return sqlite3.connect(path,
+ conn = sqlite3.connect(path,
check_same_thread=not multithreading)
else:
- return sqlite3.connect('{}/data.db'.format(path),
+ conn = sqlite3.connect('{}/data.db'.format(path),
timeout=timeout,
check_same_thread=not multithreading)
+ conn.execute('PRAGMA journal_mode=WAL;')
+ return conn
@with_conditional_transaction
def _insert_into(self, *record):
@@ -134,7 +142,7 @@ class SQLiteBase(object):
def _count(self):
sql = 'SELECT COUNT({}) FROM {}'.format(self._key_column,
self._table_name)
- row = self._putter.execute(sql).fetchone()
+ row = self._getter.execute(sql).fetchone()
return row[0] if row else 0
def _task_done(self):
diff --git a/persistqueue/sqlqueue.py b/persistqueue/sqlqueue.py
index 6c86f2f..2a53cfe 100644
--- a/persistqueue/sqlqueue.py
+++ b/persistqueue/sqlqueue.py
@@ -15,6 +15,9 @@ sqlite3.enable_callback_tracebacks(True)
log = logging.getLogger(__name__)
+# 10 seconds internal for `wait` of event
+TICK_FOR_WAIT = 10
+
class SQLiteQueue(sqlbase.SQLiteBase):
"""SQLite3 based FIFO queue."""
@@ -44,7 +47,7 @@ class SQLiteQueue(sqlbase.SQLiteBase):
def _pop(self):
with self.action_lock:
row = self._select()
- # Perhaps a sqilite bug, sometimes (None, None) is returned
+ # Perhaps a sqlite3 bug, sometimes (None, None) is returned
# by select, below can avoid these invalid records.
if row and row[0] is not None:
self._delete(row[0])
@@ -54,23 +57,31 @@ class SQLiteQueue(sqlbase.SQLiteBase):
return row[1] # pickled data
return None
- def get(self, block=False):
- unpickled = self._pop()
- item = None
- if unpickled:
- item = pickle.loads(unpickled)
+ def get(self, block=True, timeout=None):
+ if not block:
+ pickled = self._pop()
+ if not pickled:
+ raise Empty
+ elif timeout is None:
+ # block until a put event.
+ pickled = self._pop()
+ while not pickled:
+ self.put_event.wait(TICK_FOR_WAIT)
+ pickled = self._pop()
+ elif timeout < 0:
+ raise ValueError("'timeout' must be a non-negative number")
else:
- if block:
- end = _time.time() + 10.0
- while not unpickled:
- remaining = end - _time.time()
- if remaining <= 0.0:
- raise Empty
- # wait for no more than 10 seconds
- self.put_event.wait(remaining)
- unpickled = self._pop()
- item = pickle.loads(unpickled)
-
+ # block until the timeout reached
+ endtime = _time.time() + timeout
+ pickled = self._pop()
+ while not pickled:
+ remaining = endtime - _time.time()
+ if remaining <= 0.0:
+ raise Empty
+ self.put_event.wait(
+ TICK_FOR_WAIT if TICK_FOR_WAIT < remaining else remaining)
+ pickled = self._pop()
+ item = pickle.loads(pickled)
return item
def task_done(self):
| FIFOSQLiteQueue: the get() method returns None instead of blocking
and if I specify get(block=True) it raises the empty exception | peter-wangxu/persist-queue | diff --git a/tests/test_sqlqueue.py b/tests/test_sqlqueue.py
index 1e63431..fe00f42 100644
--- a/tests/test_sqlqueue.py
+++ b/tests/test_sqlqueue.py
@@ -18,7 +18,7 @@ def task_done_if_required(queue):
class SQLite3QueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlqueue')
- self.auto_commit = False
+ self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
@@ -30,7 +30,12 @@ class SQLite3QueueTest(unittest.TestCase):
task_done_if_required(q)
d = q.get()
self.assertEqual('first', d)
- self.assertRaises(Empty, q.get, block=True)
+ self.assertRaises(Empty, q.get, block=False)
+
+ # assert with timeout
+ self.assertRaises(Empty, q.get, block=True, timeout=1.0)
+ # assert with negative timeout
+ self.assertRaises(ValueError, q.get, block=True, timeout=-1.0)
def test_open_close_single(self):
"""Write 1 item, close, reopen checking if same item is there"""
@@ -75,7 +80,7 @@ class SQLite3QueueTest(unittest.TestCase):
q.get()
n -= 1
else:
- self.assertEqual(None, q.get())
+ self.assertRaises(Empty, q.get, block=False)
else:
q.put('var%d' % random.getrandbits(16))
task_done_if_required(q)
@@ -108,7 +113,7 @@ class SQLite3QueueTest(unittest.TestCase):
c.join()
self.assertEqual(0, m_queue.size)
self.assertEqual(0, len(m_queue))
- self.assertIsNone(m_queue.get(block=False))
+ self.assertRaises(Empty, m_queue.get, block=False)
def test_multi_threaded_multi_producer(self):
"""Test sqlqueue can be used by multiple producers."""
@@ -175,19 +180,35 @@ class SQLite3QueueTest(unittest.TestCase):
self.assertEqual(0, queue.qsize())
for x in range(1000):
- self.assertNotEqual(0, counter[x], "0 for counter's index %s" % x)
+ self.assertNotEqual(0, counter[x],
+ "not 0 for counter's index %s" % x)
-class SQLite3QueueAutoCommitTest(SQLite3QueueTest):
+class SQLite3QueueNoAutoCommitTest(SQLite3QueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlqueue_auto_commit')
- self.auto_commit = True
+ self.auto_commit = False
+
+ def test_multiple_consumers(self):
+ """
+ FAIL: test_multiple_consumers (
+ -tests.test_sqlqueue.SQLite3QueueNoAutoCommitTest)
+ Test sqlqueue can be used by multiple consumers.
+ ----------------------------------------------------------------------
+ Traceback (most recent call last):
+ File "persist-queue\tests\test_sqlqueue.py", line 183,
+ -in test_multiple_consumers
+ self.assertEqual(0, queue.qsize())
+ AssertionError: 0 != 72
+ :return:
+ """
+ self.skipTest('Skipped due to a known bug above.')
class SQLite3QueueInMemory(SQLite3QueueTest):
def setUp(self):
self.path = ":memory:"
- self.auto_commit = False
+ self.auto_commit = True
def test_open_close_1000(self):
self.skipTest('Memory based sqlite is not persistent.')
@@ -196,16 +217,22 @@ class SQLite3QueueInMemory(SQLite3QueueTest):
self.skipTest('Memory based sqlite is not persistent.')
def test_multiple_consumers(self):
- # TODO(peter) when the shared-cache feature is available in default
- # Python of most Linux distros, this should be easy:).
- self.skipTest('In-memory based sqlite needs the support '
- 'of shared-cache')
+ self.skipTest('Skipped due to occasional crash during '
+ 'multithreading mode.')
+
+ def test_multi_threaded_multi_producer(self):
+ self.skipTest('Skipped due to occasional crash during '
+ 'multithreading mode.')
+
+ def test_multi_threaded_parallel(self):
+ self.skipTest('Skipped due to occasional crash during '
+ 'multithreading mode.')
class FILOSQLite3QueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlqueue')
- self.auto_commit = False
+ self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
@@ -230,7 +257,7 @@ class FILOSQLite3QueueTest(unittest.TestCase):
self.assertEqual('foobar', data)
-class FILOSQLite3QueueAutoCommitTest(FILOSQLite3QueueTest):
+class FILOSQLite3QueueNoAutoCommitTest(FILOSQLite3QueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlqueue_auto_commit')
- self.auto_commit = True
+ self.auto_commit = False
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose2",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt",
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
cov-core==1.15.0
coverage==6.2
distlib==0.3.9
dnspython==2.2.1
eventlet==0.33.3
filelock==3.4.1
flake8==5.0.4
greenlet==2.0.2
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
mccabe==0.7.0
mock==5.2.0
nose2==0.13.0
packaging==21.3
-e git+https://github.com/peter-wangxu/persist-queue.git@8cd900781aa449d2e921bf5db953d02815110646#egg=persist_queue
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: persist-queue
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cov-core==1.15.0
- coverage==6.2
- distlib==0.3.9
- dnspython==2.2.1
- eventlet==0.33.3
- filelock==3.4.1
- flake8==5.0.4
- greenlet==2.0.2
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- mccabe==0.7.0
- mock==5.2.0
- nose2==0.13.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/persist-queue
| [
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_parallel",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_random_read_write",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_parallel",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_random_read_write",
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_random_read_write"
] | [] | [
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_multi_producer",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multiple_consumers",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_1000",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_single",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_multi_producer",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_1000",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_single",
"tests/test_sqlqueue.py::FILOSQLite3QueueTest::test_open_close_1000",
"tests/test_sqlqueue.py::FILOSQLite3QueueNoAutoCommitTest::test_open_close_1000"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,783 | 1,134 | [
"persistqueue/sqlbase.py",
"persistqueue/sqlqueue.py"
] |
stummjr__scrapy-fieldstats-7 | 476b8a2bdb01ab3f77a6dc5af936284f41a20272 | 2017-10-21 16:38:01 | 476b8a2bdb01ab3f77a6dc5af936284f41a20272 | diff --git a/scrapy_fieldstats/fieldstats.py b/scrapy_fieldstats/fieldstats.py
index 703b2dc..83d9860 100644
--- a/scrapy_fieldstats/fieldstats.py
+++ b/scrapy_fieldstats/fieldstats.py
@@ -1,7 +1,6 @@
# -*- coding:utf-8 -*-
import logging
import pprint
-from collections import defaultdict
from scrapy import signals
from scrapy.exceptions import NotConfigured
@@ -10,12 +9,12 @@ logger = logging.getLogger(__name__)
class FieldStatsExtension(object):
- """ When enabled, the FieldStats extensions logs the percentage of
+ """ When enabled, the FieldStats extension logs the percentage of
items coverage for a crawl.
"""
def __init__(self):
self.item_count = 0
- self.field_counts = defaultdict(int)
+ self.field_counts = {}
@classmethod
def from_crawler(cls, crawler):
@@ -28,20 +27,46 @@ class FieldStatsExtension(object):
return ext
def item_scraped(self, item, spider):
+ self.compute_item(item)
+
+ def spider_closed(self, spider):
+ fields_summary = self.build_fields_summary()
+ logger.info('Field stats:\n{}'.format(pprint.pformat(fields_summary)))
+
+ def compute_item(self, item):
self.item_count += 1
+ self.count_item_fields(item)
+
+ def count_item_fields(self, item, current_node=None):
+ if current_node is None:
+ current_node = self.field_counts
+
for name, value in item.items():
if not value:
continue
- self.field_counts[name] += 1
- def spider_closed(self, spider):
- field_stats = self.compute_fieldstats()
- logger.info('Field stats:\n{}'.format(pprint.pformat(field_stats)))
+ if isinstance(value, dict):
+ # recurse into nested items
+ if name not in current_node:
+ current_node[name] = {}
+ self.count_item_fields(value, current_node=current_node[name])
+ continue
+
+ if name not in current_node:
+ current_node[name] = 0
+ current_node[name] += 1
+
+ def build_fields_summary(self, field_counts=None, fields_summary=None):
+ if field_counts is None:
+ field_counts = self.field_counts
+ fields_summary = {}
- def compute_fieldstats(self):
- field_stats = {}
- for name, count in self.field_counts.items():
- field_coverage = int(count) * 100 / self.item_count
- field_stats[name] = "{}%".format(field_coverage)
+ for name, value in field_counts.items():
+ if isinstance(value, dict):
+ fields_summary[name] = {}
+ self.build_fields_summary(field_counts[name], fields_summary[name])
+ else:
+ field_percentage = int(value) * 100 / self.item_count
+ fields_summary[name] = "{}%".format(field_percentage)
- return field_stats
+ return fields_summary
| Support nested items
Currently, the extension only counts shallow items in dictionaries.
However, it's quite common to have nested items such as:
```json
{
"title": "Animal Farm",
"author": {
"name": "George Orwell",
"birth_location": "Motihari, India",
"birth_date": "1903-06-25"
},
"ratings": 4.9
}
```
In this case, the extension doesn't generate coverage stats for the nested `name`, `birth_location` and `birth_date` fields.
| stummjr/scrapy-fieldstats | diff --git a/tests/test_scrapy_fieldstats.py b/tests/test_scrapy_fieldstats.py
index 2873777..24b749f 100644
--- a/tests/test_scrapy_fieldstats.py
+++ b/tests/test_scrapy_fieldstats.py
@@ -3,18 +3,20 @@
from scrapy_fieldstats.fieldstats import FieldStatsExtension
-def fake_extract_items(fake_items, extension):
+def extract_fake_items_and_compute_stats(fake_items):
+ ext = FieldStatsExtension()
for item in fake_items:
- extension.item_scraped(item, None)
+ ext.compute_item(item)
+ field_stats = ext.build_fields_summary()
+ return field_stats
def test_single_item():
fake_items = [{"field1": "value1"}]
- ext = FieldStatsExtension()
- fake_extract_items(fake_items, ext)
- field_stats = ext.compute_fieldstats()
+ field_stats = extract_fake_items_and_compute_stats(fake_items)
+
assert len(field_stats) == 1
- assert field_stats.get('field1') == '100.0%'
+ assert field_stats['field1'] == '100.0%'
def test_single_item_many_fields():
@@ -24,21 +26,19 @@ def test_single_item_many_fields():
"field2": "value2",
}
]
- ext = FieldStatsExtension()
- fake_extract_items(fake_items, ext)
- field_stats = ext.compute_fieldstats()
+ field_stats = extract_fake_items_and_compute_stats(fake_items)
+
assert len(field_stats) == 2
- assert field_stats.get('field1') == '100.0%'
- assert field_stats.get('field2') == '100.0%'
+ assert field_stats['field1'] == '100.0%'
+ assert field_stats['field2'] == '100.0%'
def test_many_items():
fake_items = [{"field1": "value1"}, {"field1": "value1"}]
- ext = FieldStatsExtension()
- fake_extract_items(fake_items, ext)
- field_stats = ext.compute_fieldstats()
+ field_stats = extract_fake_items_and_compute_stats(fake_items)
+
assert len(field_stats) == 1
- assert field_stats.get('field1') == '100.0%'
+ assert field_stats['field1'] == '100.0%'
def test_many_items_many_fields():
@@ -52,12 +52,11 @@ def test_many_items_many_fields():
"field2": "value2",
}
]
- ext = FieldStatsExtension()
- fake_extract_items(fake_items, ext)
- field_stats = ext.compute_fieldstats()
+ field_stats = extract_fake_items_and_compute_stats(fake_items)
+
assert len(field_stats) == 2
- assert field_stats.get('field1') == '100.0%'
- assert field_stats.get('field2') == '100.0%'
+ assert field_stats['field1'] == '100.0%'
+ assert field_stats['field2'] == '100.0%'
def test_many_items_many_fields_missing_field():
@@ -70,12 +69,11 @@ def test_many_items_many_fields_missing_field():
"field2": "value2",
}
]
- ext = FieldStatsExtension()
- fake_extract_items(fake_items, ext)
- field_stats = ext.compute_fieldstats()
+ field_stats = extract_fake_items_and_compute_stats(fake_items)
+
assert len(field_stats) == 2
- assert field_stats.get('field1') == '100.0%'
- assert field_stats.get('field2') == '50.0%'
+ assert field_stats['field1'] == '100.0%'
+ assert field_stats['field2'] == '50.0%'
def test_many_items_many_fields_empty_field():
@@ -89,9 +87,41 @@ def test_many_items_many_fields_empty_field():
"field2": "value2",
}
]
- ext = FieldStatsExtension()
- fake_extract_items(fake_items, ext)
- field_stats = ext.compute_fieldstats()
+ field_stats = extract_fake_items_and_compute_stats(fake_items)
+
assert len(field_stats) == 2
- assert field_stats.get('field1') == '100.0%'
- assert field_stats.get('field2') == '50.0%'
+ assert field_stats['field1'] == '100.0%'
+ assert field_stats['field2'] == '50.0%'
+
+
+def test_nested_items():
+ fake_items = [
+ {
+ "field1": "value1",
+ "field2": {
+ "field2.1": "value2.1",
+ "field2.2": "value2.2",
+ "field2.3": {
+ "field2.3.1": "value2.3.1",
+ "field2.3.2": "value2.3.2",
+ },
+ }
+ },
+ {
+ "field1": "value1",
+ "field2": {
+ "field2.1": "value2.1",
+ "field2.3": {
+ "field2.3.1": "value2.3.1",
+ "field2.3.2": "",
+ },
+ "field2.4": "value2.2",
+ }
+ }
+ ]
+ field_stats = extract_fake_items_and_compute_stats(fake_items)
+ assert field_stats['field1'] == '100.0%'
+ assert field_stats['field2']['field2.1'] == '100.0%'
+ assert field_stats['field2']['field2.2'] == '50.0%'
+ assert field_stats['field2']['field2.2'] == '50.0%'
+ assert field_stats['field2']['field2.3']['field2.3.2'] == '50.0%'
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "scrapy",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
attrs @ file:///croot/attrs_1734533101012/work
Automat @ file:///tmp/build/80754af9/automat_1600298431173/work
bcrypt @ file:///croot/bcrypt_1736182451882/work
Brotli @ file:///croot/brotli-split_1736182456865/work
certifi @ file:///croot/certifi_1738623731865/work/certifi
cffi @ file:///croot/cffi_1736182485317/work
charset-normalizer @ file:///croot/charset-normalizer_1721748349566/work
constantly @ file:///croot/constantly_1703165600746/work
cryptography @ file:///croot/cryptography_1740577825284/work
cssselect @ file:///croot/cssselect_1707339882883/work
defusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work
exceptiongroup==1.2.2
filelock @ file:///croot/filelock_1700591183607/work
hyperlink @ file:///tmp/build/80754af9/hyperlink_1610130746837/work
idna @ file:///croot/idna_1714398848350/work
incremental @ file:///croot/incremental_1708639938299/work
iniconfig==2.1.0
itemadapter @ file:///tmp/build/80754af9/itemadapter_1626442940632/work
itemloaders @ file:///croot/itemloaders_1708639918324/work
jmespath @ file:///croot/jmespath_1700144569655/work
lxml @ file:///croot/lxml_1737039601731/work
packaging @ file:///croot/packaging_1734472117206/work
parsel @ file:///croot/parsel_1707503445438/work
pluggy==1.5.0
Protego @ file:///tmp/build/80754af9/protego_1598657180827/work
pyasn1 @ file:///croot/pyasn1_1729239786406/work
pyasn1_modules @ file:///home/conda/feedstock_root/build_artifacts/pyasn1-modules_1733324602540/work
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
PyDispatcher==2.0.5
pyOpenSSL @ file:///croot/pyopenssl_1741343803032/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305812635/work
pytest==8.3.5
queuelib @ file:///croot/queuelib_1696950067631/work
requests @ file:///croot/requests_1730999120400/work
requests-file @ file:///Users/ktietz/demo/mc3/conda-bld/requests-file_1629455781986/work
Scrapy @ file:///croot/scrapy_1733166797775/work
-e git+https://github.com/stummjr/scrapy-fieldstats.git@476b8a2bdb01ab3f77a6dc5af936284f41a20272#egg=scrapy_fieldstats
service-identity @ file:///Users/ktietz/demo/mc3/conda-bld/service_identity_1629460757137/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
tldextract @ file:///croot/tldextract_1723064386918/work
tomli==2.2.1
Twisted @ file:///croot/twisted_1708702809815/work
typing_extensions @ file:///croot/typing_extensions_1734714854207/work
urllib3 @ file:///croot/urllib3_1737133630106/work
w3lib @ file:///croot/w3lib_1708639924738/work
zope.interface @ file:///croot/zope.interface_1731939362051/work
| name: scrapy-fieldstats
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- appdirs=1.4.4=pyhd3eb1b0_0
- attrs=24.3.0=py39h06a4308_0
- automat=20.2.0=py_0
- bcrypt=3.2.0=py39h5eee18b_2
- brotli-python=1.0.9=py39h6a678d5_9
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2025.1.31=py39h06a4308_0
- cffi=1.17.1=py39h1fdaa30_1
- charset-normalizer=3.3.2=pyhd3eb1b0_0
- constantly=23.10.4=py39h06a4308_0
- cryptography=44.0.1=py39h7825ff9_0
- cssselect=1.2.0=py39h06a4308_0
- defusedxml=0.7.1=pyhd3eb1b0_0
- filelock=3.13.1=py39h06a4308_0
- hyperlink=21.0.0=pyhd3eb1b0_0
- icu=73.1=h6a678d5_0
- idna=3.7=py39h06a4308_0
- incremental=22.10.0=pyhd3eb1b0_0
- itemadapter=0.3.0=pyhd3eb1b0_0
- itemloaders=1.1.0=py39h06a4308_0
- jmespath=1.0.1=py39h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libxml2=2.13.5=hfdd30dd_0
- libxslt=1.1.41=h097e994_0
- lxml=5.3.0=py39h57af460_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- parsel=1.8.1=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- protego=0.1.16=py_0
- pyasn1=0.6.1=py39h06a4308_0
- pyasn1-modules=0.4.1=pyhd8ed1ab_1
- pycparser=2.21=pyhd3eb1b0_0
- pydispatcher=2.0.5=py39h06a4308_2
- pyopenssl=25.0.0=py39h06a4308_0
- pysocks=1.7.1=py39h06a4308_0
- python=3.9.21=he870216_1
- queuelib=1.6.2=py39h06a4308_0
- readline=8.2=h5eee18b_0
- requests=2.32.3=py39h06a4308_1
- requests-file=1.5.1=pyhd3eb1b0_0
- scrapy=2.12.0=py39h06a4308_0
- service_identity=18.1.0=pyhd3eb1b0_1
- setuptools=75.8.0=py39h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tldextract=5.1.2=py39h06a4308_0
- twisted=23.10.0=py39h06a4308_0
- typing_extensions=4.12.2=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- urllib3=2.3.0=py39h06a4308_0
- w3lib=2.1.2=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- zope=1.0=py39h06a4308_1
- zope.interface=7.1.1=py39h5eee18b_0
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/scrapy-fieldstats
| [
"tests/test_scrapy_fieldstats.py::test_single_item",
"tests/test_scrapy_fieldstats.py::test_single_item_many_fields",
"tests/test_scrapy_fieldstats.py::test_many_items",
"tests/test_scrapy_fieldstats.py::test_many_items_many_fields",
"tests/test_scrapy_fieldstats.py::test_many_items_many_fields_missing_field",
"tests/test_scrapy_fieldstats.py::test_many_items_many_fields_empty_field",
"tests/test_scrapy_fieldstats.py::test_nested_items"
] | [] | [] | [] | MIT License | 1,792 | 695 | [
"scrapy_fieldstats/fieldstats.py"
] |
|
stitchfix__nodebook-8 | b6e1ec614fd39acb740b04e99ee7e97d99122420 | 2017-10-21 23:04:18 | 46211e90955f3388a22e2a2132bb895814260f9a | diff --git a/nodebook/nodebookcore.py b/nodebook/nodebookcore.py
index 98b5cdc..ddae374 100644
--- a/nodebook/nodebookcore.py
+++ b/nodebook/nodebookcore.py
@@ -46,6 +46,9 @@ class ReferenceFinder(ast.NodeVisitor):
self.locals.add(node.name)
self.generic_visit(node)
+ def visit_arg(self, node):
+ self.locals.add(node.arg)
+
def visit_AugAssign(self, node):
target = node.target
while (type(target) is ast.Subscript):
diff --git a/setup.py b/setup.py
index 11d6a77..adc7229 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ import sys
setup(
name='nodebook',
- version='0.2.0',
+ version='0.2.1',
author='Kevin Zielnicki',
author_email='[email protected]',
license='Stitch Fix 2017',
| Functions don't work in nodebook in py3
Because of changes to the ast in Python 3, functions no longer are parsed correctly. Eg, from @hacktuarial:
```
def add(a, b):
return a + b
```
nodebook throws an error:
```
KeyError: "name 'a' is not defined"
``` | stitchfix/nodebook | diff --git a/tests/test_nodebookcore.py b/tests/test_nodebookcore.py
index cfd9646..470121c 100644
--- a/tests/test_nodebookcore.py
+++ b/tests/test_nodebookcore.py
@@ -42,6 +42,16 @@ class TestReferenceFinder(object):
assert rf.locals == {'pd', 'y'}
assert rf.imports == {'pandas'}
+ def test_function(self, rf):
+ code_tree = ast.parse(
+ "def add(x,y):\n"
+ " return x+y\n"
+ )
+ rf.visit(code_tree)
+ assert rf.inputs == set()
+ assert rf.locals == {'add', 'x', 'y'}
+ assert rf.imports == set()
+
class TestNodebook(object):
@pytest.fixture()
diff --git a/tests/test_pickledict.py b/tests/test_pickledict.py
index ef35fdd..90b7088 100644
--- a/tests/test_pickledict.py
+++ b/tests/test_pickledict.py
@@ -33,6 +33,12 @@ class TestPickleDict(object):
df = pd.DataFrame({'a': [0, 1, 2], 'b': ['foo', 'bar', 'baz']})
mydict['test_df'] = df
assert mydict['test_df'].equals(df)
+
+ def test_func(self, mydict):
+ def add(a, b):
+ return a + b
+ mydict['test_func'] = add
+ assert mydict['test_func'](3,5) == 8
def test_immutability(self, mydict):
l = [1, 2, 3]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
asttokens==3.0.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
comm==0.2.2
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
dill==0.3.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
executing==2.2.0
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
ipykernel==6.29.5
ipython==8.18.1
ipywidgets==8.1.5
isoduration==20.11.0
jedi==0.19.2
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter==1.1.1
jupyter-console==6.6.3
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.3.6
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==3.0.13
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mistune==3.1.3
msgpack-python==0.5.6
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nest-asyncio==1.6.0
-e git+https://github.com/stitchfix/nodebook.git@b6e1ec614fd39acb740b04e99ee7e97d99122420#egg=nodebook
notebook==7.3.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandocfilters==1.5.1
parso==0.8.4
pexpect==4.9.0
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
prometheus_client==0.21.1
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pycparser==2.22
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-runner==6.0.1
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
stack-data==0.6.3
terminado==0.18.1
tinycss2==1.4.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
wcwidth==0.2.13
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==4.0.13
zipp==3.21.0
| name: nodebook
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- asttokens==3.0.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- comm==0.2.2
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- dill==0.3.9
- executing==2.2.0
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- ipykernel==6.29.5
- ipython==8.18.1
- ipywidgets==8.1.5
- isoduration==20.11.0
- jedi==0.19.2
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter==1.1.1
- jupyter-client==8.6.3
- jupyter-console==6.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.3.6
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==3.0.13
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mistune==3.1.3
- msgpack-python==0.5.6
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nest-asyncio==1.6.0
- notebook==7.3.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- pandas==2.2.3
- pandocfilters==1.5.1
- parso==0.8.4
- pexpect==4.9.0
- platformdirs==4.3.7
- prometheus-client==0.21.1
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pycparser==2.22
- pygments==2.19.1
- pytest-runner==6.0.1
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- stack-data==0.6.3
- terminado==0.18.1
- tinycss2==1.4.0
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- wcwidth==0.2.13
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==4.0.13
- zipp==3.21.0
prefix: /opt/conda/envs/nodebook
| [
"tests/test_nodebookcore.py::TestReferenceFinder::test_function"
] | [] | [
"tests/test_nodebookcore.py::TestReferenceFinder::test_assign",
"tests/test_nodebookcore.py::TestReferenceFinder::test_augassign",
"tests/test_nodebookcore.py::TestReferenceFinder::test_import",
"tests/test_nodebookcore.py::TestReferenceFinder::test_multiline",
"tests/test_nodebookcore.py::TestNodebook::test_single_node",
"tests/test_nodebookcore.py::TestNodebook::test_node_chain",
"tests/test_pickledict.py::TestPickleDict::test_int[mode_memory]",
"tests/test_pickledict.py::TestPickleDict::test_int[mode_disk]",
"tests/test_pickledict.py::TestPickleDict::test_string[mode_memory]",
"tests/test_pickledict.py::TestPickleDict::test_string[mode_disk]",
"tests/test_pickledict.py::TestPickleDict::test_bytes[mode_memory]",
"tests/test_pickledict.py::TestPickleDict::test_bytes[mode_disk]",
"tests/test_pickledict.py::TestPickleDict::test_df[mode_memory]",
"tests/test_pickledict.py::TestPickleDict::test_df[mode_disk]",
"tests/test_pickledict.py::TestPickleDict::test_func[mode_memory]",
"tests/test_pickledict.py::TestPickleDict::test_func[mode_disk]",
"tests/test_pickledict.py::TestPickleDict::test_immutability[mode_memory]",
"tests/test_pickledict.py::TestPickleDict::test_immutability[mode_disk]"
] | [] | Apache License 2.0 | 1,794 | 254 | [
"nodebook/nodebookcore.py",
"setup.py"
] |
|
viraptor__phply-37 | 4a21aa038611ab6ebd0bf8ea5d3ee97e624339d3 | 2017-10-24 11:18:45 | 4a21aa038611ab6ebd0bf8ea5d3ee97e624339d3 | diff --git a/phply/phpparse.py b/phply/phpparse.py
index 69d012e..12dc62b 100644
--- a/phply/phpparse.py
+++ b/phply/phpparse.py
@@ -1569,8 +1569,17 @@ def p_encaps_list_string(p):
if p[1] == '':
p[0] = process_php_string_escapes(p[2])
else:
- p[0] = ast.BinaryOp('.', p[1], process_php_string_escapes(p[2]),
- lineno=p.lineno(2))
+ if isinstance(p[1], string_type):
+ # if it's only a string so far, just append the contents
+ p[0] = p[1] + process_php_string_escapes(p[2])
+ elif isinstance(p[1], ast.BinaryOp) and isinstance(p[1].right, string_type):
+ # if the last right leaf is a string, extend previous binop
+ p[0] = ast.BinaryOp('.', p[1].left, p[1].right + process_php_string_escapes(p[2]),
+ lineno=p[1].lineno)
+ else:
+ # worst case - insert a binaryop
+ p[0] = ast.BinaryOp('.', p[1], process_php_string_escapes(p[2]),
+ lineno=p.lineno(2))
def p_encaps_var(p):
'encaps_var : VARIABLE'
| Issue with parsing HERE_DOC syntax
I've got a PHP file that has a big heredoc block in it. When I attempt to parse it, I get a recursion limit error:
```
Traceback (most recent call last):
File "php2json.py", line 31, in <module>
output, indent=2)
File "/Library/Python/2.7/site-packages/simplejson/__init__.py", line 276, in dump
for chunk in iterable:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 665, in _iterencode
for chunk in _iterencode_list(o, _current_indent_level):
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 634, in _iterencode_dict
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 634, in _iterencode_dict
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list
for chunk in chunks:
File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 634, in _iterencode_dict
for chunk in chunks:
```
And it goes on like that for hundreds or thousands of lines. I'm not sure what structure the data was parsed into, but it appears to be a loop of some sort. | viraptor/phply | diff --git a/tests/test_parser.py b/tests/test_parser.py
index 0ecb9a8..62bf2cc 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -235,18 +235,12 @@ EOT;
BinaryOp('.',
BinaryOp('.',
BinaryOp('.',
- BinaryOp('.',
- BinaryOp('.',
- BinaryOp('.',
- 'This',
- ' is a "'),
- Variable('$heredoc')),
- '" with some '),
- ObjectProperty(Variable('$embedded'),
- 'variables')),
- '.\n'),
- 'This'),
- ' is not the EOT; this is:')]),
+ 'This is a "',
+ Variable('$heredoc')),
+ '" with some '),
+ ObjectProperty(Variable('$embedded'),
+ 'variables')),
+ '.\nThis is not the EOT; this is:')]),
]
eq_ast(input, expected)
if sys.version_info[0] < 3:
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
packaging==21.3
-e git+https://github.com/viraptor/phply.git@4a21aa038611ab6ebd0bf8ea5d3ee97e624339d3#egg=phply
pluggy==1.0.0
ply==3.11
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: phply
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- ply==3.11
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/phply
| [
"tests/test_parser.py::test_heredoc"
] | [] | [
"tests/test_parser.py::test_inline_html",
"tests/test_parser.py::test_echo",
"tests/test_parser.py::test_open_tag_with_echo",
"tests/test_parser.py::test_exit",
"tests/test_parser.py::test_isset",
"tests/test_parser.py::test_namespace_names",
"tests/test_parser.py::test_unary_ops",
"tests/test_parser.py::test_assignment_ops",
"tests/test_parser.py::test_object_properties",
"tests/test_parser.py::test_string_unescape",
"tests/test_parser.py::test_string_offset_lookups",
"tests/test_parser.py::test_string_curly_dollar_expressions",
"tests/test_parser.py::test_function_calls",
"tests/test_parser.py::test_method_calls",
"tests/test_parser.py::test_if",
"tests/test_parser.py::test_foreach",
"tests/test_parser.py::test_foreach_with_lists",
"tests/test_parser.py::test_global_variables",
"tests/test_parser.py::test_variable_variables",
"tests/test_parser.py::test_classes",
"tests/test_parser.py::test_new",
"tests/test_parser.py::test_exceptions",
"tests/test_parser.py::test_catch_finally",
"tests/test_parser.py::test_just_finally",
"tests/test_parser.py::test_declare",
"tests/test_parser.py::test_instanceof",
"tests/test_parser.py::test_static_members",
"tests/test_parser.py::test_casts",
"tests/test_parser.py::test_namespaces",
"tests/test_parser.py::test_use_declarations",
"tests/test_parser.py::test_constant_declarations",
"tests/test_parser.py::test_closures",
"tests/test_parser.py::test_magic_constants",
"tests/test_parser.py::test_type_hinting",
"tests/test_parser.py::test_static_scalar_class_constants",
"tests/test_parser.py::test_backtick_shell_exec",
"tests/test_parser.py::test_open_close_tags_ignore",
"tests/test_parser.py::test_ternary",
"tests/test_parser.py::test_array_dereferencing",
"tests/test_parser.py::test_array_literal",
"tests/test_parser.py::test_array_in_default_arg",
"tests/test_parser.py::test_const_heredoc",
"tests/test_parser.py::test_object_property_on_expr",
"tests/test_parser.py::test_binary_string",
"tests/test_parser.py::test_class_trait_use",
"tests/test_parser.py::test_trait",
"tests/test_parser.py::test_trait_renames",
"tests/test_parser.py::test_class_name_as_string",
"tests/test_parser.py::test_static_expressions",
"tests/test_parser.py::test_const_arrays",
"tests/test_parser.py::test_numbers",
"tests/test_parser.py::test_result_multiple_offsets",
"tests/test_parser.py::test_yield",
"tests/test_parser.py::test_static_property_dynamic_access",
"tests/test_parser.py::test_static_property_dynamic_call",
"tests/test_parser.py::test_nowdoc",
"tests/test_parser.py::test_exit_loc"
] | [] | BSD | 1,802 | 338 | [
"phply/phpparse.py"
] |
|
elastic__elasticsearch-dsl-py-759 | 269fef7fa12333f7622c3694df75a1b296d87ae2 | 2017-10-25 12:36:19 | e8906dcd17eb2021bd191325817ff7541d838ea1 | diff --git a/elasticsearch_dsl/analysis.py b/elasticsearch_dsl/analysis.py
index 8424283..c2abd94 100644
--- a/elasticsearch_dsl/analysis.py
+++ b/elasticsearch_dsl/analysis.py
@@ -19,9 +19,9 @@ class AnalysisBase(object):
class CustomAnalysis(object):
name = 'custom'
- def __init__(self, name, builtin_type='custom', **kwargs):
+ def __init__(self, filter_name, builtin_type='custom', **kwargs):
self._builtin_type = builtin_type
- self._name = name
+ self._name = filter_name
super(CustomAnalysis, self).__init__(**kwargs)
def to_dict(self):
| Can't create custom stemming filter
The stemming filter, requires the use a property called `name`, however the the library is using this property.
(https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-stemmer-tokenfilter.html)
This code
```python
light_stemmer_ = token_filter(name="minimal_english",type= "stemmer")
```
will produce this:
```json
"filter" : {
"minimal_english" : {
"type" : "stemmer"
}
}
```
while I would like this
```json
"filter" : {
"light_stemmer_" : {
"type" : "stemmer",
"name" : "minimal_english"
}
}
```
I suggest either changing the name of the variable the user is using, or allowing a variable like `_name` to become `name` when serializing.
as a workaround I am changed this line https://github.com/elastic/elasticsearch-dsl-py/blob/29d28a012a5a3a930e66cee56178208f21cb5fdf/elasticsearch_dsl/analysis.py#L33 to only pop if the type is not stemming.
like this
```python
if self._builtin_type is 'stemmer' and 'name' in d[self.name] :
d['name'] = d[self.name]['name']
d = d.pop(self.name)
```
and them in my code I do
```python
light_stemmer_ = token_filter("light_stemmer_", "stemmer")
light_stemmer_.name = "minimal_english"
```
but I know it is a hacky solution | elastic/elasticsearch-dsl-py | diff --git a/test_elasticsearch_dsl/test_analysis.py b/test_elasticsearch_dsl/test_analysis.py
index 014c43d..6dc3c09 100644
--- a/test_elasticsearch_dsl/test_analysis.py
+++ b/test_elasticsearch_dsl/test_analysis.py
@@ -79,3 +79,11 @@ def test_custom_analyzer_can_collect_custom_items():
}
} == a.get_analysis_definition()
+def test_stemmer_analyzer_can_pass_name():
+ t = analysis.token_filter('my_english_filter', name="minimal_english", type="stemmer")
+ assert t.to_dict() == 'my_english_filter'
+ assert {
+ "type" : "stemmer",
+ "name" : "minimal_english"
+ } == t.get_definition()
+
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 5.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e '.[develop]'",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.18.1
elasticsearch==5.5.3
-e git+https://github.com/elastic/elasticsearch-dsl-py.git@269fef7fa12333f7622c3694df75a1b296d87ae2#egg=elasticsearch_dsl
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: elasticsearch-dsl-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.18.1
- elasticsearch==5.5.3
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/elasticsearch-dsl-py
| [
"test_elasticsearch_dsl/test_analysis.py::test_stemmer_analyzer_can_pass_name"
] | [] | [
"test_elasticsearch_dsl/test_analysis.py::test_analyzer_serializes_as_name",
"test_elasticsearch_dsl/test_analysis.py::test_analyzer_has_definition",
"test_elasticsearch_dsl/test_analysis.py::test_normalizer_serializes_as_name",
"test_elasticsearch_dsl/test_analysis.py::test_normalizer_has_definition",
"test_elasticsearch_dsl/test_analysis.py::test_tokenizer",
"test_elasticsearch_dsl/test_analysis.py::test_custom_analyzer_can_collect_custom_items"
] | [] | Apache License 2.0 | 1,807 | 175 | [
"elasticsearch_dsl/analysis.py"
] |
|
rollbar__pyrollbar-213 | c9d34b1d1544415a17d5a79e90179a763e739bfc | 2017-10-25 21:10:12 | a87ba1887c362cdc911eaa1b1c53311edfbed2c3 | rokob: It turns out that copy/deepcopy go through the pickle machinery via __reduce__ and __reduce_ex__, so things that are not copyable throw TypeErrors from pickling. | diff --git a/rollbar/lib/__init__.py b/rollbar/lib/__init__.py
index 1afaee1..9a3f664 100644
--- a/rollbar/lib/__init__.py
+++ b/rollbar/lib/__init__.py
@@ -173,7 +173,10 @@ def dict_merge(a, b):
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
- result[k] = copy.deepcopy(v)
+ try:
+ result[k] = copy.deepcopy(v)
+ except:
+ result[k] = '<Uncopyable obj:(%s)>' % (v,)
return result
| Exception while reporting exc_info to Rollbar. TypeError("can't pickle select.epoll objects",)
I'm using `rollbar==0.13.16` with `tornado==4.5.2` and getting the following exception:
```
[E 171025 05:47:48 __init__:411] Exception while reporting message to Rollbar. TypeError("can't pickle select.epoll objects",)
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/tornado/web.py", line 1511, in _execute
result = yield result
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "/project-ps-hsm/src/handlers/balance.py", line 21, in get
yield self.check_blacklist()
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "/project-ps-hsm/src/handlers/_base.py", line 81, in check_blacklist
reason = yield is_blacklisted(self.can, self.current_user)
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "/project-ps-hsm/src/utils/afp_processing.py", line 42, in is_blacklisted
'SECRET': config.INTERNAL_API_SECRET['customer'],
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
tornado.httpclient.HTTPError: HTTP 500: Internal Server Error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/rollbar/__init__.py", line 409, in report_message
return _report_message(message, level, request, extra_data, payload_data)
File "/usr/local/lib/python3.6/site-packages/rollbar/__init__.py", line 712, in _report_message
data = dict_merge(data, payload_data)
File "/usr/local/lib/python3.6/site-packages/rollbar/lib/__init__.py", line 174, in dict_merge
result[k] = dict_merge(result[k], v)
File "/usr/local/lib/python3.6/site-packages/rollbar/lib/__init__.py", line 176, in dict_merge
result[k] = copy.deepcopy(v)
File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.6/copy.py", line 215, in _deepcopy_list
append(deepcopy(a, memo))
File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.6/copy.py", line 220, in _deepcopy_tuple
y = [deepcopy(a, memo) for a in x]
File "/usr/local/lib/python3.6/copy.py", line 220, in <listcomp>
y = [deepcopy(a, memo) for a in x]
File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct
state = deepcopy(state, memo)
File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct
state = deepcopy(state, memo)
File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct
state = deepcopy(state, memo)
File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct
state = deepcopy(state, memo)
File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.6/copy.py", line 169, in deepcopy
rv = reductor(4)
TypeError: can't pickle select.epoll objects
```
It results in certain errors don't show up in Rollbar which is pretty bad 😞
It seems like not every `exc_info` is pickable and this situation should be handled properly. I suggest still reporting the error message, even without the exception info. | rollbar/pyrollbar | diff --git a/rollbar/test/test_lib.py b/rollbar/test/test_lib.py
new file mode 100644
index 0000000..201a2ed
--- /dev/null
+++ b/rollbar/test/test_lib.py
@@ -0,0 +1,59 @@
+from rollbar.lib import dict_merge
+
+from rollbar.test import BaseTest
+
+class RollbarLibTest(BaseTest):
+ def test_dict_merge_not_dict(self):
+ a = {'a': {'b': 42}}
+ b = 99
+ result = dict_merge(a, b)
+
+ self.assertEqual(99, result)
+
+ def test_dict_merge_dicts_independent(self):
+ a = {'a': {'b': 42}}
+ b = {'x': {'y': 99}}
+ result = dict_merge(a, b)
+
+ self.assertIn('a', result)
+ self.assertIn('b', result['a'])
+ self.assertEqual(42, result['a']['b'])
+ self.assertIn('x', result)
+ self.assertIn('y', result['x'])
+ self.assertEqual(99, result['x']['y'])
+
+ def test_dict_merge_dicts(self):
+ a = {'a': {'b': 42}}
+ b = {'a': {'c': 99}}
+ result = dict_merge(a, b)
+
+ self.assertIn('a', result)
+ self.assertIn('b', result['a'])
+ self.assertIn('c', result['a'])
+ self.assertEqual(42, result['a']['b'])
+ self.assertEqual(99, result['a']['c'])
+
+ def test_dict_merge_dicts_second_wins(self):
+ a = {'a': {'b': 42}}
+ b = {'a': {'b': 99}}
+ result = dict_merge(a, b)
+
+ self.assertIn('a', result)
+ self.assertIn('b', result['a'])
+ self.assertEqual(99, result['a']['b'])
+
+ def test_dict_merge_dicts_select_poll(self):
+ import select
+ poll = getattr(select, 'poll', None)
+ if poll is None:
+ return
+ p = poll()
+ a = {'a': {'b': 42}}
+ b = {'a': {'y': p}}
+ result = dict_merge(a, b)
+
+ self.assertIn('a', result)
+ self.assertIn('b', result['a'])
+ self.assertEqual(42, result['a']['b'])
+ self.assertIn('y', result['a'])
+ self.assertRegex(result['a']['y'], r'Uncopyable obj')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"mock",
"webob",
"blinker",
"unittest2",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
blinker==1.5
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
linecache2==1.0.0
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
-e git+https://github.com/rollbar/pyrollbar.git@c9d34b1d1544415a17d5a79e90179a763e739bfc#egg=rollbar
six==1.17.0
tomli==1.2.3
traceback2==1.4.0
typing_extensions==4.1.1
unittest2==1.1.0
urllib3==1.26.20
WebOb==1.8.9
zipp==3.6.0
| name: pyrollbar
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- attrs==22.2.0
- blinker==1.5
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- linecache2==1.0.0
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- traceback2==1.4.0
- typing-extensions==4.1.1
- unittest2==1.1.0
- urllib3==1.26.20
- webob==1.8.9
- zipp==3.6.0
prefix: /opt/conda/envs/pyrollbar
| [
"rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts_select_poll"
] | [] | [
"rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts",
"rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts_independent",
"rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts_second_wins",
"rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_not_dict"
] | [] | MIT License | 1,808 | 168 | [
"rollbar/lib/__init__.py"
] |
ahawker__ulid-59 | efdac942d7f969c802903f574965ca860882a891 | 2017-10-26 02:56:21 | 64db8e687fcb5faaf68c92dc5d8adef2b4b1bddd | diff --git a/ulid/base32.py b/ulid/base32.py
index 83f8a8a..f7377b6 100644
--- a/ulid/base32.py
+++ b/ulid/base32.py
@@ -31,11 +31,11 @@ DECODING = array.array(
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
- 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
+ 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
- 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
- 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
+ 0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
| Non-Crockford's Base32 letters converted differently in Java or Python implementations
Hi Andrew,
first of all, thanks for the amazing library, we've been using a lot!
I have a doubt regarding how we fix the conversion of ULIDs which are not following Crockford's Base32 standard.
We are using Lua to generate some guids (https://github.com/Tieske/ulid.lua) and for some reason, we get from time to time letters outside the Crockford's Base32.
While trying to fix this on our side (we're not sure how this is happening to be honest), we realised that Java and Python implementations silently corrects this issue in different ways:
### Java
```java
ULID.Value ulidValueFromString = ULID.parseULID("01BX73KC0TNH409RTFD1JXKmO0")
--> "01BX73KC0TNH409RTFD1JXKM00"
```
`mO` is silently converted into `M0`
### Python
```python
In [1]: import ulid
In [2]: u = ulid.from_str('01BX73KC0TNH409RTFD1JXKmO0')
In [3]: u
Out[3]: <ULID('01BX73KC0TNH409RTFD1JXKQZ0')>
In [4]: u.str
Out[4]: '01BX73KC0TNH409RTFD1JXKQZ0'
```
`mO` is silently converted into `QZ`
Shouldn't the python library behave as the Java one as per the [Crockford's Base32](http://crockford.com/wrmg/base32.html) spec, converting `L` and `I` to `1` and `O` to `0` and only upper casing lower case letters instead of changing them?
Thanks a lot in advance!
Eddie | ahawker/ulid | diff --git a/tests/test_base32.py b/tests/test_base32.py
index ab2df67..cac8214 100644
--- a/tests/test_base32.py
+++ b/tests/test_base32.py
@@ -9,6 +9,14 @@ import pytest
from ulid import base32
[email protected](scope='session')
+def decoding_alphabet():
+ """
+ Fixture that yields the entire alphabet that is valid for base32 decoding.
+ """
+ return base32.ENCODING + 'lLiIoO'
+
+
def test_encode_handles_ulid_and_returns_26_char_string(valid_bytes_128):
"""
Assert that :func:`~ulid.base32.encode` encodes a valid 128 bit bytes object into a :class:`~str`
@@ -235,3 +243,12 @@ def test_decode_randomness_raises_on_non_ascii_str(invalid_str_encoding):
"""
with pytest.raises(ValueError):
base32.decode_randomness(invalid_str_encoding)
+
+
+def test_decode_table_has_value_for_entire_decoding_alphabet(decoding_alphabet):
+ """
+ Assert that :attr:`~ulid.base32.DECODING` stores a valid value mapping for all characters that
+ can be base32 decoded.
+ """
+ for char in decoding_alphabet:
+ assert base32.DECODING[ord(char)] != 0xFF, 'Character "{}" decoded improperly'.format(char)
diff --git a/tests/test_bugs.py b/tests/test_bugs.py
new file mode 100644
index 0000000..6ab8fcb
--- /dev/null
+++ b/tests/test_bugs.py
@@ -0,0 +1,21 @@
+"""
+ test_bugs
+ ~~~~~~~~~
+
+ Tests for validating reported bugs have been fixed.
+"""
+from ulid import api
+
+
+def test_github_issue_58():
+ """
+ Assert that :func:`~ulid.api.from_str` can properly decode strings that
+ contain Base32 "translate" characters.
+
+ Base32 "translate" characters are: "iI, lL, oO".
+
+ Issue: https://github.com/ahawker/ulid/issues/58
+ """
+ value = '01BX73KC0TNH409RTFD1JXKmO0'
+ instance = api.from_str(value)
+ assert instance.str == '01BX73KC0TNH409RTFD1JXKM00'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements/base.txt",
"requirements/dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==2.3.0
attrs==22.2.0
bandit==1.4.0
bumpversion==0.5.3
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
dparse==0.6.3
execnet==1.9.0
gitdb==4.0.9
GitPython==3.1.18
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mypy==0.540
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pylint==1.7.4
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
PyYAML==6.0.1
requests==2.27.1
safety==1.6.1
six==1.17.0
smmap==5.0.0
stevedore==3.5.2
tomli==1.2.3
typed-ast==1.1.2
typing_extensions==4.1.1
-e git+https://github.com/ahawker/ulid.git@efdac942d7f969c802903f574965ca860882a891#egg=ulid_py
urllib3==1.26.20
wrapt==1.16.0
zipp==3.6.0
| name: ulid
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==2.3.0
- attrs==22.2.0
- bandit==1.4.0
- bumpversion==0.5.3
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- dparse==0.6.3
- execnet==1.9.0
- gitdb==4.0.9
- gitpython==3.1.18
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mypy==0.540
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pylint==1.7.4
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- pyyaml==6.0.1
- requests==2.27.1
- safety==1.6.1
- six==1.17.0
- smmap==5.0.0
- stevedore==3.5.2
- tomli==1.2.3
- typed-ast==1.1.2
- typing-extensions==4.1.1
- urllib3==1.26.20
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/ulid
| [
"tests/test_base32.py::test_decode_table_has_value_for_entire_decoding_alphabet",
"tests/test_bugs.py::test_github_issue_58"
] | [] | [
"tests/test_base32.py::test_encode_handles_ulid_and_returns_26_char_string",
"tests/test_base32.py::test_encode_handles_timestamp_and_returns_10_char_string",
"tests/test_base32.py::test_encode_handles_randomness_and_returns_16_char_string",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[0]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[1]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[2]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[3]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[4]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[5]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[6]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[7]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[8]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[9]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[10]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[11]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[12]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[13]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[14]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[15]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[16]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[17]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[18]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[19]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[20]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[21]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[22]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[23]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[24]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[25]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[26]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[27]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[28]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[29]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[30]",
"tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[31]",
"tests/test_base32.py::test_encode_ulid_returns_26_char_string",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[0]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[1]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[2]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[3]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[4]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[5]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[6]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[7]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[8]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[9]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[10]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[11]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[12]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[13]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[14]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[15]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[16]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[17]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[18]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[19]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[20]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[21]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[22]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[23]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[24]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[25]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[26]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[27]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[28]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[29]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[30]",
"tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[31]",
"tests/test_base32.py::test_encode_timestamp_returns_10_char_string",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[0]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[1]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[2]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[3]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[4]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[5]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[6]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[7]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[8]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[9]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[10]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[11]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[12]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[13]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[14]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[15]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[16]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[17]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[18]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[19]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[20]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[21]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[22]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[23]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[24]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[25]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[26]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[27]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[28]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[29]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[30]",
"tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[31]",
"tests/test_base32.py::test_encode_randomness_returns_16_char_string",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[0]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[1]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[2]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[3]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[4]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[5]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[6]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[7]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[8]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[9]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[10]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[11]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[12]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[13]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[14]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[15]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[16]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[17]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[18]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[19]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[20]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[21]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[22]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[23]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[24]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[25]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[26]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[27]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[28]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[29]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[30]",
"tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[31]",
"tests/test_base32.py::test_decode_handles_ulid_and_returns_16_bytes",
"tests/test_base32.py::test_decode_handles_timestamp_and_returns_6_bytes",
"tests/test_base32.py::test_decode_handles_randomness_and_returns_10_bytes",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[0]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[1]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[2]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[3]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[4]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[5]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[6]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[7]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[8]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[9]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[10]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[11]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[12]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[13]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[14]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[15]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[16]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[17]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[18]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[19]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[20]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[21]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[22]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[23]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[24]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[25]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[26]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[27]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[28]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[29]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[30]",
"tests/test_base32.py::test_decode_raises_on_str_length_mismatch[31]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[0]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[1]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[2]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[3]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[4]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[5]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[6]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[7]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[8]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[9]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[10]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[11]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[12]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[13]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[14]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[15]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[16]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[17]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[18]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[19]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[20]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[21]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[22]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[23]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[24]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[25]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[26]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[27]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[28]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[29]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[30]",
"tests/test_base32.py::test_decode_raises_on_non_ascii_str[31]",
"tests/test_base32.py::test_decode_ulid_returns_16_bytes",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[0]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[1]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[2]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[3]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[4]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[5]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[6]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[7]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[8]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[9]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[10]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[11]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[12]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[13]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[14]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[15]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[16]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[17]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[18]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[19]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[20]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[21]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[22]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[23]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[24]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[25]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[26]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[27]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[28]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[29]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[30]",
"tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[31]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[0]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[1]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[2]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[3]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[4]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[5]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[6]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[7]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[8]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[9]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[10]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[11]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[12]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[13]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[14]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[15]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[16]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[17]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[18]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[19]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[20]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[21]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[22]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[23]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[24]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[25]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[26]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[27]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[28]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[29]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[30]",
"tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[31]",
"tests/test_base32.py::test_decode_timestamp_returns_6_bytes",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[0]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[1]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[2]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[3]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[4]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[5]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[6]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[7]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[8]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[9]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[10]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[11]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[12]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[13]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[14]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[15]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[16]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[17]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[18]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[19]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[20]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[21]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[22]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[23]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[24]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[25]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[26]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[27]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[28]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[29]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[30]",
"tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[31]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[0]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[1]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[2]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[3]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[4]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[5]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[6]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[7]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[8]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[9]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[10]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[11]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[12]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[13]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[14]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[15]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[16]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[17]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[18]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[19]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[20]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[21]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[22]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[23]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[24]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[25]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[26]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[27]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[28]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[29]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[30]",
"tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[31]",
"tests/test_base32.py::test_decode_randomness_returns_10_bytes",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[0]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[1]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[2]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[3]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[4]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[5]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[6]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[7]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[8]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[9]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[10]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[11]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[12]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[13]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[14]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[15]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[16]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[17]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[18]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[19]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[20]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[21]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[22]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[23]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[24]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[25]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[26]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[27]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[28]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[29]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[30]",
"tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[31]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[0]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[1]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[2]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[3]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[4]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[5]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[6]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[7]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[8]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[9]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[10]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[11]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[12]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[13]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[14]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[15]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[16]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[17]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[18]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[19]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[20]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[21]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[22]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[23]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[24]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[25]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[26]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[27]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[28]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[29]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[30]",
"tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[31]"
] | [] | Apache License 2.0 | 1,810 | 832 | [
"ulid/base32.py"
] |
|
sendgrid__sendgrid-python-454 | 718ff6a1f91f7696b25c4ffd5b1dff9892e76f54 | 2017-10-27 15:41:53 | e270db212f25ad8ca4cc48aa17dc30b48cb3758a | diff --git a/sendgrid/helpers/mail/mail.py b/sendgrid/helpers/mail/mail.py
index 116afb4..7d131f4 100644
--- a/sendgrid/helpers/mail/mail.py
+++ b/sendgrid/helpers/mail/mail.py
@@ -293,7 +293,14 @@ class Mail(object):
:type content: Content
"""
- self._contents.append(content)
+ if self._contents is None:
+ self._contents = []
+
+ # Text content should be before HTML content
+ if content._type == "text/plain":
+ self._contents.insert(0, content)
+ else:
+ self._contents.append(content)
@property
def attachments(self):
| text/plain must precede text/html content
#### Issue Summary
Requests to send mail with both plain text and HTML content fail if the HTML content is specified first.
#### Code
```
sg = sendgrid.SendGridAPIClient(apikey=sendgrid_key)
mail = Mail()
from_email = Email("[email protected]")
to_email = Email("[email protected]")
subject = "Sending with SendGrid is Fun"
per = Personalization()
mail.from_email = from_email
mail.subject = subject
html_content = Content("text/html", "<html><body>some text here</body></html>")
plain_content = Content("text/plain", "and easy to do anywhere, even with Python")
### Add plain content first
mail.add_content(plain_content)
### Add HTML content next
mail.add_content(html_content)
per.add_to(to_email)
mail.add_personalization(per)
response = sg.client.mail.send.post(request_body=mail.get())
```
#### Steps to Reproduce
1. The above code works properly, but if you reverse the order of the add_content lines, http-client throws a BadRequestsError
#### Expected Result
The library should sort content into the order that the API expects. (I'm not clear why the order should matter to the API—perhaps this should be fixed there instead.)
#### Technical details:
* sendgrid-python Version: master (latest commit: [b12728a53d4c997832c56289c7559f22acf1ff90])
* Python Version: 2.7.13 | sendgrid/sendgrid-python | diff --git a/test/test_mail.py b/test/test_mail.py
index 8b88f5b..0941fa7 100644
--- a/test/test_mail.py
+++ b/test/test_mail.py
@@ -68,6 +68,39 @@ class UnitTests(unittest.TestCase):
self.assertTrue(isinstance(str(mail), str))
+ def test_helloEmailAdditionalContent(self):
+ """Tests bug found in Issue-451 with Content ordering causing a crash"""
+
+ self.maxDiff = None
+
+ """Minimum required to send an email"""
+ mail = Mail()
+
+ mail.from_email = Email("[email protected]")
+
+ mail.subject = "Hello World from the SendGrid Python Library"
+
+ personalization = Personalization()
+ personalization.add_to(Email("[email protected]"))
+ mail.add_personalization(personalization)
+
+ mail.add_content(Content("text/html", "<html><body>some text here</body></html>"))
+ mail.add_content(Content("text/plain", "some text here"))
+
+ self.assertEqual(
+ json.dumps(
+ mail.get(),
+ sort_keys=True),
+ '{"content": [{"type": "text/plain", "value": "some text here"}, '
+ '{"type": "text/html", '
+ '"value": "<html><body>some text here</body></html>"}], '
+ '"from": {"email": "[email protected]"}, "personalizations": '
+ '[{"to": [{"email": "[email protected]"}]}], '
+ '"subject": "Hello World from the SendGrid Python Library"}'
+ )
+
+ self.assertTrue(isinstance(str(mail), str))
+
def test_kitchenSink(self):
self.maxDiff = None
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 5.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
dataclasses==0.8
Flask==0.10.1
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
MarkupSafe==2.0.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-http-client==3.3.7
PyYAML==3.11
-e git+https://github.com/sendgrid/sendgrid-python.git@718ff6a1f91f7696b25c4ffd5b1dff9892e76f54#egg=sendgrid
six==1.10.0
tomli==1.2.3
typing_extensions==4.1.1
Werkzeug==2.0.3
zipp==3.6.0
| name: sendgrid-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- dataclasses==0.8
- flask==0.10.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- markupsafe==2.0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-http-client==3.3.7
- pyyaml==3.11
- six==1.10.0
- tomli==1.2.3
- typing-extensions==4.1.1
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/sendgrid-python
| [
"test/test_mail.py::UnitTests::test_helloEmailAdditionalContent"
] | [] | [
"test/test_mail.py::UnitTests::test_asm_display_group_limit",
"test/test_mail.py::UnitTests::test_disable_tracking",
"test/test_mail.py::UnitTests::test_helloEmail",
"test/test_mail.py::UnitTests::test_kitchenSink",
"test/test_mail.py::UnitTests::test_unicode_values_in_substitutions_helper"
] | [] | MIT License | 1,812 | 169 | [
"sendgrid/helpers/mail/mail.py"
] |
|
great-expectations__great_expectations-107 | e4f42b80d95cff33339681464a72d833c692dd65 | 2017-10-27 16:05:27 | c5ba7058a8afc99b7b9ce523d3cb183961a321a3 | diff --git a/great_expectations/dataset/base.py b/great_expectations/dataset/base.py
index 14acf8ecf..47c943cba 100644
--- a/great_expectations/dataset/base.py
+++ b/great_expectations/dataset/base.py
@@ -6,6 +6,7 @@ import traceback
import pandas as pd
import numpy as np
+from collections import defaultdict
from .util import DotDict, ensure_json_serializable
@@ -106,6 +107,12 @@ class DataSet(object):
else:
raise(err)
+ #Add a "success" object to the config
+ if output_format == "BOOLEAN_ONLY":
+ expectation_config["success_on_last_run"] = return_obj
+ else:
+ expectation_config["success_on_last_run"] = return_obj["success"]
+
#Append the expectation to the config.
self.append_expectation(expectation_config)
@@ -187,22 +194,111 @@ class DataSet(object):
self.default_expectation_args[argument] = value
- def get_expectations_config(self):
- return self._expectations_config
-
- def save_expectations_config(self, filepath=None):
+ def get_expectations_config(self,
+ discard_failed_expectations=True,
+ discard_output_format_kwargs=True,
+ discard_include_configs_kwargs=True,
+ discard_catch_exceptions_kwargs=True,
+ suppress_warnings=False
+ ):
+ config = dict(self._expectations_config)
+ config = copy.deepcopy(config)
+ expectations = config["expectations"]
+
+ discards = defaultdict(int)
+
+ if discard_failed_expectations:
+ new_expectations = []
+
+ for expectation in expectations:
+ #Note: This is conservative logic.
+ #Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
+ #In cases where expectation["success"] is missing or None, expectations are *retained*.
+ #Such a case could occur if expectations were loaded from a config file and never run.
+ if "success_on_last_run" in expectation and expectation["success_on_last_run"] == False:
+ discards["failed_expectations"] += 1
+ else:
+ new_expectations.append(expectation)
+
+ expectations = new_expectations
+
+ for expectation in expectations:
+ if "success_on_last_run" in expectation:
+ del expectation["success_on_last_run"]
+
+ if discard_output_format_kwargs:
+ if "output_format" in expectation["kwargs"]:
+ del expectation["kwargs"]["output_format"]
+ discards["output_format"] += 1
+
+ if discard_include_configs_kwargs:
+ if "include_configs" in expectation["kwargs"]:
+ del expectation["kwargs"]["include_configs"]
+ discards["include_configs"] += 1
+
+ if discard_catch_exceptions_kwargs:
+ if "catch_exceptions" in expectation["kwargs"]:
+ del expectation["kwargs"]["catch_exceptions"]
+ discards["catch_exceptions"] += 1
+
+
+ if not suppress_warnings:
+ """
+WARNING: get_expectations_config discarded
+ 12 failing expectations
+ 44 output_format kwargs
+ 0 include_config kwargs
+ 1 catch_exceptions kwargs
+If you wish to change this behavior, please set discard_failed_expectations, discard_output_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.
+ """
+ if any([discard_failed_expectations, discard_output_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs]):
+ print ("WARNING: get_expectations_config discarded")
+ if discard_failed_expectations:
+ print ("\t%d failing expectations" % discards["failed_expectations"])
+ if discard_output_format_kwargs:
+ print ("\t%d output_format kwargs" % discards["output_format"])
+ if discard_include_configs_kwargs:
+ print ("\t%d include_configs kwargs" % discards["include_configs"])
+ if discard_catch_exceptions_kwargs:
+ print ("\t%d catch_exceptions kwargs" % discards["catch_exceptions"])
+ print ("If you wish to change this behavior, please set discard_failed_expectations, discard_output_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.")
+
+ config["expectations"] = expectations
+ return config
+
+ def save_expectations_config(
+ self,
+ filepath=None,
+ discard_failed_expectations=True,
+ discard_output_format_kwargs=True,
+ discard_include_configs_kwargs=True,
+ discard_catch_exceptions_kwargs=True,
+ suppress_warnings=False
+ ):
if filepath==None:
- #!!! Fetch the proper filepath from the project config
+ #FIXME: Fetch the proper filepath from the project config
pass
- expectation_config_str = json.dumps(self.get_expectations_config(), indent=2)
+ expectations_config = self.get_expectations_config(
+ discard_failed_expectations,
+ discard_output_format_kwargs,
+ discard_include_configs_kwargs,
+ discard_catch_exceptions_kwargs,
+ suppress_warnings
+ )
+ expectation_config_str = json.dumps(expectations_config, indent=2)
open(filepath, 'w').write(expectation_config_str)
def validate(self, expectations_config=None, catch_exceptions=True, output_format=None, include_config=None, only_return_failures=False):
results = []
if expectations_config is None:
- expectations_config = self.get_expectations_config()
+ expectations_config = self.get_expectations_config(
+ discard_failed_expectations=False,
+ discard_output_format_kwargs=False,
+ discard_include_configs_kwargs=False,
+ discard_catch_exceptions_kwargs=False,
+ )
for expectation in expectations_config['expectations']:
expectation_method = getattr(self, expectation['expectation_type'])
diff --git a/great_expectations/dataset/util.py b/great_expectations/dataset/util.py
index b8556219a..8bb9200b2 100644
--- a/great_expectations/dataset/util.py
+++ b/great_expectations/dataset/util.py
@@ -3,18 +3,26 @@
import numpy as np
from scipy import stats
import pandas as pd
+import copy
from functools import wraps
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
+
def __getattr__(self, attr):
return self.get(attr)
+
__setattr__= dict.__setitem__
__delattr__= dict.__delitem__
+
def __dir__(self):
return self.keys()
+ #Cargo-cultishly copied from: https://github.com/spindlelabs/pyes/commit/d2076b385c38d6d00cebfe0df7b0d1ba8df934bc
+ def __deepcopy__(self, memo):
+ return DotDict([(copy.deepcopy(k, memo), copy.deepcopy(v, memo)) for k, v in self.items()])
+
class DocInherit(object):
"""
| Proposal: define a clear pattern for default output_format and catch_exceptions parameters
The default value of those parameters changes based on context according to the documentation, and that is currently implemented by ```None``` triggering the default value. Document that or agree it's clear. | great-expectations/great_expectations | diff --git a/tests/test_cli.py b/tests/test_cli.py
index 8d5f1057c..503c1d11a 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -49,6 +49,9 @@ class TestCLI(unittest.TestCase):
result = get_system_command_result(command_str)["output"]
json_result = json.loads(result)
except ValueError as ve:
+ print ("=== Result ==================================================")
+ print (result)
+ print ("=== Error ===================================================")
print(ve)
json_result = {}
diff --git a/tests/test_dataset.py b/tests/test_dataset.py
index 50e96e1ef..3e059955c 100644
--- a/tests/test_dataset.py
+++ b/tests/test_dataset.py
@@ -1,3 +1,7 @@
+import json
+import tempfile
+import shutil
+
import pandas as pd
import great_expectations as ge
@@ -33,6 +37,7 @@ class TestDataset(unittest.TestCase):
}
)
+ self.maxDiff = None
self.assertEqual(
D.get_expectations_config(),
{
@@ -81,6 +86,219 @@ class TestDataset(unittest.TestCase):
}
)
+ def test_get_and_save_expectation_config(self):
+ directory_name = tempfile.mkdtemp()
+
+ df = ge.dataset.PandasDataSet({
+ 'x' : [1,2,4],
+ 'y' : [1,2,5],
+ 'z' : ['hello', 'jello', 'mello'],
+ })
+ df.expect_column_values_to_be_in_set('x', [1,2,4])
+ df.expect_column_values_to_be_in_set('y', [1,2,4])
+ df.expect_column_values_to_match_regex('z', 'ello')
+
+ ### First test set ###
+
+ output_config = {
+ "expectations": [
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "x"
+ }
+ },
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "y"
+ }
+ },
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "z"
+ }
+ },
+ {
+ "expectation_type": "expect_column_values_to_be_in_set",
+ "kwargs": {
+ "column": "x",
+ "values_set": [
+ 1,
+ 2,
+ 4
+ ]
+ }
+ },
+ {
+ "expectation_type": "expect_column_values_to_match_regex",
+ "kwargs": {
+ "column": "z",
+ "regex": "ello"
+ }
+ }
+ ],
+ "dataset_name": None
+ }
+
+ self.assertEqual(
+ df.get_expectations_config(),
+ output_config,
+ )
+
+ df.save_expectations_config(directory_name+'/temp1.json')
+ temp_file = open(directory_name+'/temp1.json')
+ self.assertEqual(
+ json.load(temp_file),
+ output_config,
+ )
+ temp_file.close()
+
+ ### Second test set ###
+
+ output_config = {
+ "expectations": [
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "x"
+ }
+ },
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "y"
+ }
+ },
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "z"
+ }
+ },
+ {
+ "expectation_type": "expect_column_values_to_be_in_set",
+ "kwargs": {
+ "column": "x",
+ "values_set": [
+ 1,
+ 2,
+ 4
+ ]
+ }
+ },
+ {
+ "expectation_type": "expect_column_values_to_be_in_set",
+ "kwargs": {
+ "column": "y",
+ "values_set": [
+ 1,
+ 2,
+ 4
+ ]
+ }
+ },
+ {
+ "expectation_type": "expect_column_values_to_match_regex",
+ "kwargs": {
+ "column": "z",
+ "regex": "ello"
+ }
+ }
+ ],
+ "dataset_name": None
+ }
+
+ self.assertEqual(
+ df.get_expectations_config(
+ discard_failed_expectations=False
+ ),
+ output_config
+ )
+
+ df.save_expectations_config(
+ directory_name+'/temp2.json',
+ discard_failed_expectations=False
+ )
+ temp_file = open(directory_name+'/temp2.json')
+ self.assertEqual(
+ json.load(temp_file),
+ output_config,
+ )
+ temp_file.close()
+
+ ### Third test set ###
+
+ output_config = {
+ "expectations": [
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "x"
+ }
+ },
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "y"
+ }
+ },
+ {
+ "expectation_type": "expect_column_to_exist",
+ "kwargs": {
+ "column": "z"
+ }
+ },
+ {
+ "expectation_type": "expect_column_values_to_be_in_set",
+ "kwargs": {
+ "column": "x",
+ "values_set": [
+ 1,
+ 2,
+ 4
+ ],
+ "output_format": "BASIC"
+ }
+ },
+ {
+ "expectation_type": "expect_column_values_to_match_regex",
+ "kwargs": {
+ "column": "z",
+ "regex": "ello",
+ "output_format": "BASIC"
+ }
+ }
+ ],
+ "dataset_name": None
+ }
+
+ self.assertEqual(
+ df.get_expectations_config(
+ discard_output_format_kwargs=False,
+ discard_include_configs_kwargs=False,
+ discard_catch_exceptions_kwargs=False,
+ ),
+ output_config
+ )
+
+ df.save_expectations_config(
+ directory_name+'/temp3.json',
+ discard_output_format_kwargs=False,
+ discard_include_configs_kwargs=False,
+ discard_catch_exceptions_kwargs=False,
+ )
+ temp_file = open(directory_name+'/temp3.json')
+ self.assertEqual(
+ json.load(temp_file),
+ output_config,
+ )
+ temp_file.close()
+
+ # Clean up the output directory
+ shutil.rmtree(directory_name)
+
def test_format_column_map_output(self):
df = ge.dataset.PandasDataSet({
"x" : list("abcdefghijklmnopqrstuvwxyz")
@@ -228,6 +446,5 @@ class TestDataset(unittest.TestCase):
(False, 0.0)
)
-
if __name__ == "__main__":
unittest.main()
diff --git a/tests/test_great_expectations.py b/tests/test_great_expectations.py
index b5fbf1076..9295d469a 100644
--- a/tests/test_great_expectations.py
+++ b/tests/test_great_expectations.py
@@ -107,9 +107,12 @@ class TestValidation(unittest.TestCase):
expected_results
)
+
+ validation_results = my_df.validate(only_return_failures=True)
+ # print json.dumps(validation_results, indent=2)
assertDeepAlmostEqual(
self,
- my_df.validate(only_return_failures=True),
+ validation_results,
{"results": [{"exception_traceback": None, "expectation_type": "expect_column_values_to_be_in_set", "success": False, "exception_list": ["*"], "raised_exception": False, "kwargs": {"column": "PClass", "output_format": "COMPLETE", "values_set": ["1st", "2nd", "3rd"]}, "exception_index_list": [456]}]}
)
diff --git a/tests/test_pandas_dataset.py b/tests/test_pandas_dataset.py
index 2f92e50e2..2ce8a7673 100644
--- a/tests/test_pandas_dataset.py
+++ b/tests/test_pandas_dataset.py
@@ -1,7 +1,5 @@
import unittest
import json
-import hashlib
-import datetime
import numpy as np
import great_expectations as ge
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argh==0.27.2
attrs==22.2.0
certifi==2021.5.30
coverage==6.2
execnet==1.9.0
-e git+https://github.com/great-expectations/great_expectations.git@e4f42b80d95cff33339681464a72d833c692dd65#egg=great_expectations
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==3.2.0
numpy==1.19.5
packaging==21.3
pandas==1.1.5
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.5.4
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: great_expectations
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argh==0.27.2
- attrs==22.2.0
- coverage==6.2
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==3.2.0
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.5.4
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/great_expectations
| [
"tests/test_dataset.py::TestDataset::test_get_and_save_expectation_config"
] | [
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_mean_to_be_between"
] | [
"tests/test_cli.py::TestCLI::test_cli_arguments",
"tests/test_dataset.py::TestDataset::test_calc_map_expectation_success",
"tests/test_dataset.py::TestDataset::test_dataset",
"tests/test_dataset.py::TestDataset::test_format_column_map_output",
"tests/test_dataset.py::TestDataset::test_set_default_expectation_argument",
"tests/test_great_expectations.py::TestCustomClass::test_custom_class",
"tests/test_great_expectations.py::TestValidation::test_validate",
"tests/test_great_expectations.py::TestRepeatedAppendExpectation::test_validate",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_most_common_value_to_be",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_most_common_value_to_be_in_set",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_proportion_of_unique_values_to_be_between",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_stdev_to_be_between",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_unique_value_count_to_be_between",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_value_lengths_to_be_between",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_between",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_dateutil_parseable",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_in_set",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_in_type_list",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_json_parseable",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_null",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_of_type",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_be_unique",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_match_json_schema",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_match_regex",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_match_regex_list",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_match_strftime_format",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_not_be_in_set",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_not_be_null",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_column_values_to_not_match_regex",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_table_row_count_to_be_between",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expect_table_row_count_to_equal",
"tests/test_pandas_dataset.py::TestPandasDataset::test_expectation_decorator_summary_mode",
"tests/test_pandas_dataset.py::TestPandasDataset::test_positional_arguments"
] | [] | Apache License 2.0 | 1,814 | 1,564 | [
"great_expectations/dataset/base.py",
"great_expectations/dataset/util.py"
] |
|
cwacek__python-jsonschema-objects-95 | ba178ce7680e14e4ac367a6fab5ea3655396668f | 2017-10-29 22:24:51 | ba178ce7680e14e4ac367a6fab5ea3655396668f | diff --git a/python_jsonschema_objects/classbuilder.py b/python_jsonschema_objects/classbuilder.py
index 4ba6006..ed55b99 100644
--- a/python_jsonschema_objects/classbuilder.py
+++ b/python_jsonschema_objects/classbuilder.py
@@ -39,6 +39,7 @@ class ProtocolBase(collections.MutableMapping):
"""
__propinfo__ = {}
__required__ = set()
+ __has_default__ = set()
__object_attr_list__ = set(["_properties", "_extended_properties"])
def as_dict(self):
@@ -158,6 +159,13 @@ class ProtocolBase(collections.MutableMapping):
[None for x in
six.moves.xrange(len(self.__prop_names__))]))
+ # To support defaults, we have to actually execute the constructors
+ # but only for the ones that have defaults set.
+ for name in self.__has_default__:
+ if name not in props:
+ logger.debug(util.lazy_format("Initializing '{0}' ", name))
+ setattr(self, name, None)
+
for prop in props:
try:
logger.debug(util.lazy_format("Setting value for '{0}' to {1}", prop, props[prop]))
@@ -166,10 +174,9 @@ class ProtocolBase(collections.MutableMapping):
import sys
raise six.reraise(type(e), type(e)(str(e) + " \nwhile setting '{0}' in {1}".format(
prop, self.__class__.__name__)), sys.exc_info()[2])
+
if getattr(self, '__strict__', None):
self.validate()
- #if len(props) > 0:
- # self.validate()
def __setattr__(self, name, val):
if name in self.__object_attr_list__:
@@ -277,7 +284,10 @@ class ProtocolBase(collections.MutableMapping):
def MakeLiteral(name, typ, value, **properties):
properties.update({'type': typ})
klass = type(str(name), tuple((LiteralValue,)), {
- '__propinfo__': {'__literal__': properties}
+ '__propinfo__': {
+ '__literal__': properties,
+ '__default__': properties.get('default')
+ }
})
return klass(value)
@@ -328,6 +338,9 @@ class LiteralValue(object):
else:
self._value = value
+ if self._value is None and self.default() is not None:
+ self._value = self.default()
+
self.validate()
def as_dict(self):
@@ -336,6 +349,10 @@ class LiteralValue(object):
def for_json(self):
return self._value
+ @classmethod
+ def default(cls):
+ return cls.__propinfo__.get('__default__')
+
@classmethod
def propinfo(cls, propname):
if propname not in cls.__propinfo__:
@@ -516,7 +533,9 @@ class ClassBuilder(object):
"""
cls = type(str(nm), tuple((LiteralValue,)), {
- '__propinfo__': { '__literal__': clsdata}
+ '__propinfo__': {
+ '__literal__': clsdata,
+ '__default__': clsdata.get('default')}
})
return cls
@@ -525,6 +544,7 @@ class ClassBuilder(object):
logger.debug(util.lazy_format("Building object {0}", nm))
props = {}
+ defaults = set()
properties = {}
for p in parents:
@@ -541,6 +561,9 @@ class ClassBuilder(object):
name_translation[prop] = prop.replace('@', '')
prop = name_translation[prop]
+ if detail.get('default', None) is not None:
+ defaults.add(prop)
+
if detail.get('type', None) == 'object':
uri = "{0}/{1}_{2}".format(nm,
prop, "<anonymous>")
@@ -673,6 +696,7 @@ class ClassBuilder(object):
.format(nm, invalid_requires))
props['__required__'] = required
+ props['__has_default__'] = defaults
if required and kw.get("strict"):
props['__strict__'] = True
cls = type(str(nm.split('/')[-1]), tuple(parents), props)
@@ -765,7 +789,10 @@ def make_property(prop, info, desc=""):
elif getattr(info['type'], 'isLiteralClass', False) is True:
if not isinstance(val, info['type']):
validator = info['type'](val)
- validator.validate()
+ validator.validate()
+ if validator._value is not None:
+ # This allows setting of default Literal values
+ val = validator
elif util.safe_issubclass(info['type'], ProtocolBase):
if not isinstance(val, info['type']):
| Feature request: implement default values
Would it be possible for the auto-generated class to implement any default values as specified in the JSON schema? Right now if I have a property defined as:
```
"sample-boolean": {
"description": "A sample boolean",
"type": "boolean",
"default": false
}
```
and I instantiate an object of the generated class using obj = MyClass.from_json('{}') (or any JSON that does not include "sample-boolean") then obj.sample-boolean will be None instead of false.
Implementing the defaults would be a useful feature, I think. | cwacek/python-jsonschema-objects | diff --git a/test/test_pytest.py b/test/test_pytest.py
index 4e52b6f..70dea0e 100644
--- a/test/test_pytest.py
+++ b/test/test_pytest.py
@@ -493,3 +493,27 @@ def test_boolean_in_child_object():
ns = builder.build_classes()
ns.Test(data={"my_bool": True})
+
+
+
[email protected]('default', [
+ '{"type": "boolean", "default": false}',
+ '{"type": "string", "default": "Hello"}',
+ '{"type": "integer", "default": 500}'
+])
+def test_default_values(default):
+ default = json.loads(default)
+ schema = {
+ "$schema": "http://json-schema.org/schema#",
+ "id": "test",
+ "type": "object",
+ "properties": {
+ "sample": default
+ }
+ }
+
+ builder = pjs.ObjectBuilder(schema)
+ ns = builder.build_classes()
+
+ x = ns.Test()
+ assert x.sample == default['default']
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"pytest",
"nose",
"rednose",
"pyandoc",
"pandoc",
"sphinx",
"sphinx-autobuild",
"recommonmark"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"development.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
colorama==0.4.5
commonmark==0.9.1
coverage==6.2
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
inflection==0.2.0
iniconfig==1.1.1
Jinja2==3.0.3
jsonschema==2.6.0
livereload==2.6.3
Markdown==2.4
MarkupSafe==2.0.1
nose==1.3.0
packaging==21.3
pandoc==2.4
pandocfilters==1.5.1
pluggy==1.0.0
plumbum==1.8.3
ply==3.11
py==1.11.0
pyandoc==0.2.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
-e git+https://github.com/cwacek/python-jsonschema-objects.git@ba178ce7680e14e4ac367a6fab5ea3655396668f#egg=python_jsonschema_objects
python-termstyle==0.1.10
pytz==2025.2
recommonmark==0.7.1
rednose==0.4.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-autobuild==2021.3.14
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
tornado==6.1
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: python-jsonschema-objects
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- commonmark==0.9.1
- coverage==6.2
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- inflection==0.2.0
- iniconfig==1.1.1
- jinja2==3.0.3
- jsonschema==2.6.0
- livereload==2.6.3
- markdown==2.4
- markupsafe==2.0.1
- nose==1.3.0
- packaging==21.3
- pandoc==2.4
- pandocfilters==1.5.1
- pluggy==1.0.0
- plumbum==1.8.3
- ply==3.11
- py==1.11.0
- pyandoc==0.2.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-termstyle==0.1.10
- pytz==2025.2
- recommonmark==0.7.1
- rednose==0.4.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-autobuild==2021.3.14
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tornado==6.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/python-jsonschema-objects
| [
"test/test_pytest.py::test_default_values[{\"type\":"
] | [] | [
"test/test_pytest.py::test_schema_validation",
"test/test_pytest.py::test_regression_9",
"test/test_pytest.py::test_build_classes_is_idempotent",
"test/test_pytest.py::test_underscore_properties",
"test/test_pytest.py::test_array_regressions",
"test/test_pytest.py::test_arrays_can_have_reffed_items_of_mixed_type",
"test/test_pytest.py::test_regression_39",
"test/test_pytest.py::test_loads_markdown_schema_extraction",
"test/test_pytest.py::test_object_builder_loads_memory_references",
"test/test_pytest.py::test_object_builder_reads_all_definitions",
"test/test_pytest.py::test_oneOf_validates_against_any_valid[{\"MyData\":",
"test/test_pytest.py::test_oneOf_fails_against_non_matching",
"test/test_pytest.py::test_oneOfBare_validates_against_any_valid[{\"MyAddress\":",
"test/test_pytest.py::test_oneOfBare_validates_against_any_valid[{\"firstName\":",
"test/test_pytest.py::test_oneOfBare_fails_against_non_matching",
"test/test_pytest.py::test_additional_props_allowed_by_default",
"test/test_pytest.py::test_additional_props_permitted_explicitly",
"test/test_pytest.py::test_still_raises_when_accessing_undefined_attrs",
"test/test_pytest.py::test_permits_deletion_of_additional_properties",
"test/test_pytest.py::test_additional_props_disallowed_explicitly",
"test/test_pytest.py::test_objects_can_be_empty",
"test/test_pytest.py::test_object_equality_should_compare_data",
"test/test_pytest.py::test_object_allows_attributes_in_oncstructor",
"test/test_pytest.py::test_object_validates_on_json_decode",
"test/test_pytest.py::test_object_validates_enumerations",
"test/test_pytest.py::test_validation_of_mixed_type_enums",
"test/test_pytest.py::test_objects_allow_non_required_attrs_to_be_missing",
"test/test_pytest.py::test_objects_require_required_attrs_on_validate",
"test/test_pytest.py::test_attribute_access_via_dict",
"test/test_pytest.py::test_attribute_set_via_dict",
"test/test_pytest.py::test_numeric_attribute_validation",
"test/test_pytest.py::test_objects_validate_prior_to_serializing",
"test/test_pytest.py::test_serializing_removes_null_objects",
"test/test_pytest.py::test_lists_get_serialized_correctly",
"test/test_pytest.py::test_dictionary_transformation[pdict0]",
"test/test_pytest.py::test_dictionary_transformation[pdict1]",
"test/test_pytest.py::test_strict_mode",
"test/test_pytest.py::test_boolean_in_child_object"
] | [] | MIT License | 1,823 | 1,105 | [
"python_jsonschema_objects/classbuilder.py"
] |
|
Azure__msrestazure-for-python-55 | 005f5a4320385930ba82d4c0e13ce90506884b27 | 2017-10-30 22:28:54 | 0f372b60f9add4c245c323e24acca038936e472f | diff --git a/msrestazure/azure_exceptions.py b/msrestazure/azure_exceptions.py
index bb85333..5b4792c 100644
--- a/msrestazure/azure_exceptions.py
+++ b/msrestazure/azure_exceptions.py
@@ -30,6 +30,15 @@ from msrest.exceptions import ClientException
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
+class CloudErrorRoot(object):
+ """Just match the "error" key at the root of a OdataV4 JSON.
+ """
+ _validation = {}
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'CloudErrorData'},
+ }
+ def __init__(self, error):
+ self.error = error
class CloudErrorData(object):
"""Cloud Error Data object, deserialized from error data returned
@@ -47,7 +56,7 @@ class CloudErrorData(object):
def __init__(self, *args, **kwargs):
self.error = kwargs.get('error')
- self._message = kwargs.get('message')
+ self.message = kwargs.get('message')
self.request_id = None
self.error_time = None
self.target = kwargs.get('target')
@@ -122,7 +131,10 @@ class CloudError(ClientException):
"""
def __init__(self, response, error=None, *args, **kwargs):
- self.deserializer = Deserializer({'CloudErrorData': CloudErrorData})
+ self.deserializer = Deserializer({
+ 'CloudErrorRoot': CloudErrorRoot,
+ 'CloudErrorData': CloudErrorData
+ })
self.error = None
self.message = None
self.response = response
@@ -149,13 +161,7 @@ class CloudError(ClientException):
def _build_error_data(self, response):
try:
- data = response.json()
- except ValueError:
- data = response
- else:
- data = data.get('error', data)
- try:
- self.error = self.deserializer(CloudErrorData(), data)
+ self.error = self.deserializer('CloudErrorRoot', response).error
except DeserializationError:
self.error = None
else:
@@ -178,7 +184,10 @@ class CloudError(ClientException):
except ValueError:
message = "none"
else:
- message = data.get("message", self._get_state(data))
+ try:
+ message = data.get("message", self._get_state(data))
+ except AttributeError: # data is not a dict, but is a requests.Response parsable as JSON
+ message = str(response.text)
try:
response.raise_for_status()
except RequestException as err:
| CloudError parsing should be resilient if input type is string
In so (messy) scenario, we don't receive a dict (from a JSON), but a string. We should be robust to that and print the while string as the error message:
```python
msrest.http_logger : b'"{\\"error\\":{\\"code\\":\\"ResourceGroupNotFound\\",\\"message\\":\\"Resource group \'res_grp\' could not be found.\\"}}"'
'str' object has no attribute 'get'
Traceback (most recent call last):
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\main.py", line 36, in main
cmd_result = APPLICATION.execute(args)
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\application.py", line 212, in execute
result = expanded_arg.func(params)
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\commands\__init__.py", line 377, in __call__
return self.handler(*args, **kwargs)
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\commands\__init__.py", line 620, in _execute_command
reraise(*sys.exc_info())
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\six.py", line 693, in reraise
raise value
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\commands\__init__.py", line 614, in _execute_command
return list(result)
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrest\paging.py", line 109, in __next__
self.advance_page()
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrest\paging.py", line 95, in advance_page
self._response = self._get_next(self.next_link)
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\mgmt\compute\v2017_03_30\operations\disks_operations.py", line 441, in internal_paging
exp = CloudError(response)
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrestazure\azure_exceptions.py", line 136, in __init__
self._build_error_data(response)
File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrestazure\azure_exceptions.py", line 156, in _build_error_data
data = data.get('error', data)
AttributeError: 'str' object has no attribute 'get'
``` | Azure/msrestazure-for-python | diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
index 2506a9c..45a4770 100644
--- a/tests/test_exceptions.py
+++ b/tests/test_exceptions.py
@@ -125,20 +125,6 @@ class TestCloudException(unittest.TestCase):
response.headers = {"content-type": "application/json; charset=utf8"}
response.reason = 'BadRequest'
- message = {
- 'code': '500',
- 'message': {'value': 'Bad Request\nRequest:34875\nTime:1999-12-31T23:59:59-23:59'},
- 'values': {'invalid_attribute':'data'}
- }
-
- response.text = json.dumps(message)
- response.json = lambda: json.loads(response.text)
-
- error = CloudError(response)
- self.assertEqual(error.message, 'Bad Request')
- self.assertEqual(error.status_code, 400)
- self.assertIsInstance(error.error, CloudErrorData)
-
message = { 'error': {
'code': '500',
'message': {'value': 'Bad Request\nRequest:34875\nTime:1999-12-31T23:59:59-23:59'},
@@ -146,6 +132,7 @@ class TestCloudException(unittest.TestCase):
}}
response.text = json.dumps(message)
+ response.json = lambda: json.loads(response.text)
error = CloudError(response)
self.assertEqual(error.message, 'Bad Request')
self.assertEqual(error.status_code, 400)
@@ -175,9 +162,9 @@ class TestCloudException(unittest.TestCase):
response.text = '{\r\n "odata.metadata":"https://account.region.batch.azure.com/$metadata#Microsoft.Azure.Batch.Protocol.Entities.Container.errors/@Element","code":"InvalidHeaderValue","message":{\r\n "lang":"en-US","value":"The value for one of the HTTP headers is not in the correct format.\\nRequestId:5f4c1f05-603a-4495-8e80-01f776310bbd\\nTime:2016-01-04T22:12:33.9245931Z"\r\n },"values":[\r\n {\r\n "key":"HeaderName","value":"Content-Type"\r\n },{\r\n "key":"HeaderValue","value":"application/json; odata=minimalmetadata; charset=utf-8"\r\n }\r\n ]\r\n}'
error = CloudError(response)
- self.assertIsInstance(error.error, CloudErrorData)
+ self.assertIn("The value for one of the HTTP headers is not in the correct format", error.message)
- response.text = '{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","target":null,"details":[{"message":"The maximum number of Free ServerFarms allowed in a Subscription is 10."},{"code":"Conflict"},{"errorentity":{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","extendedCode":"59301","messageTemplate":"The maximum number of {0} ServerFarms allowed in a Subscription is {1}.","parameters":["Free","10"],"innerErrors":null}}],"innererror":null}'
+ response.text = '{"error":{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","target":null,"details":[{"message":"The maximum number of Free ServerFarms allowed in a Subscription is 10."},{"code":"Conflict"},{"errorentity":{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","extendedCode":"59301","messageTemplate":"The maximum number of {0} ServerFarms allowed in a Subscription is {1}.","parameters":["Free","10"],"innerErrors":null}}],"innererror":null}}'
error = CloudError(response)
self.assertIsInstance(error.error, CloudErrorData)
self.assertEqual(error.error.error, "Conflict")
@@ -199,6 +186,11 @@ class TestCloudException(unittest.TestCase):
self.assertIsInstance(error.error, CloudErrorData)
self.assertEqual(error.error.error, "BadArgument")
+ # See https://github.com/Azure/msrestazure-for-python/issues/54
+ response.text = '"{\\"error\\": {\\"code\\": \\"ResourceGroupNotFound\\", \\"message\\": \\"Resource group \'res_grp\' could not be found.\\"}}"'
+ error = CloudError(response)
+ self.assertIn(response.text, error.message)
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"httpretty",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | adal==0.4.7
backports.tarfile==1.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
cryptography==44.0.2
exceptiongroup==1.2.2
httpretty==1.1.4
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
isodate==0.7.2
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
more-itertools==10.6.0
msrest==0.4.29
-e git+https://github.com/Azure/msrestazure-for-python.git@005f5a4320385930ba82d4c0e13ce90506884b27#egg=msrestazure
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
pycparser==2.22
PyJWT==2.10.1
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.32.3
requests-oauthlib==2.0.0
SecretStorage==3.3.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
zipp==3.21.0
| name: msrestazure-for-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- adal==0.4.7
- backports-tarfile==1.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- cryptography==44.0.2
- exceptiongroup==1.2.2
- httpretty==1.1.4
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isodate==0.7.2
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- more-itertools==10.6.0
- msrest==0.4.29
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pyjwt==2.10.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.32.3
- requests-oauthlib==2.0.0
- secretstorage==3.3.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/msrestazure-for-python
| [
"tests/test_exceptions.py::TestCloudException::test_cloud_error"
] | [] | [
"tests/test_exceptions.py::TestCloudException::test_cloud_exception"
] | [] | MIT License | 1,825 | 633 | [
"msrestazure/azure_exceptions.py"
] |
|
alecthomas__voluptuous-307 | 48d1ea8f0f07478e50da38935d7e452f80d8501b | 2017-10-31 05:06:40 | b99459ffb6c9932abae3c6882c634a108a34f6ab | diff --git a/voluptuous/schema_builder.py b/voluptuous/schema_builder.py
index 429ab32..2c430a5 100644
--- a/voluptuous/schema_builder.py
+++ b/voluptuous/schema_builder.py
@@ -875,10 +875,11 @@ class VirtualPathComponent(str):
class Marker(object):
"""Mark nodes for special treatment."""
- def __init__(self, schema_, msg=None):
+ def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
+ self.description = description
def __call__(self, v):
try:
@@ -930,8 +931,9 @@ class Optional(Marker):
{'key2': 'value'}
"""
- def __init__(self, schema, msg=None, default=UNDEFINED):
- super(Optional, self).__init__(schema, msg=msg)
+ def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
+ super(Optional, self).__init__(schema, msg=msg,
+ description=description)
self.default = default_factory(default)
@@ -971,8 +973,9 @@ class Exclusive(Optional):
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
- def __init__(self, schema, group_of_exclusion, msg=None):
- super(Exclusive, self).__init__(schema, msg=msg)
+ def __init__(self, schema, group_of_exclusion, msg=None, description=None):
+ super(Exclusive, self).__init__(schema, msg=msg,
+ description=description)
self.group_of_exclusion = group_of_exclusion
@@ -1038,8 +1041,9 @@ class Required(Marker):
{'key': []}
"""
- def __init__(self, schema, msg=None, default=UNDEFINED):
- super(Required, self).__init__(schema, msg=msg)
+ def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
+ super(Required, self).__init__(schema, msg=msg,
+ description=description)
self.default = default_factory(default)
| RFC: Add description attribute to Required/Optional
So I'm not sure if this should be even a thing in voluptuous, but since more users of this lib might be interested I thought I'll bring it up.
We use voluptuous in [Home Assistant](https://github.com/home-assistant/home-assistant) and it's awesome. For some of the things that we have voluptuous validate, we would like to generate a schema of what is allowed.
To this end I started an experiment a while ago called [voluptuous form](https://github.com/balloob/voluptuous_form). I'm looking into reviving this effort but there is one missing piece of the puzzle: description of the fields.
Would description be something that would make sense to add to Voluptuous?
Potential syntax:
```python
import voluptuous as vol
SCHEMA = vol.Schema({
vol.Required('name', description='The name of the user.'): str,
vol.Required('age', description='The age of the user.'): int,
})
``` | alecthomas/voluptuous | diff --git a/voluptuous/tests/tests.py b/voluptuous/tests/tests.py
index 8b82f98..48cbfd0 100644
--- a/voluptuous/tests/tests.py
+++ b/voluptuous/tests/tests.py
@@ -6,7 +6,7 @@ import sys
from nose.tools import assert_equal, assert_raises, assert_true
from voluptuous import (
- Schema, Required, Optional, Extra, Invalid, In, Remove, Literal,
+ Schema, Required, Exclusive, Optional, Extra, Invalid, In, Remove, Literal,
Url, MultipleInvalid, LiteralInvalid, TypeInvalid, NotIn, Match, Email,
Replace, Range, Coerce, All, Any, Length, FqdnUrl, ALLOW_EXTRA, PREVENT_EXTRA,
validate, ExactSequence, Equal, Unordered, Number, Maybe, Datetime, Date,
@@ -915,3 +915,17 @@ def test_PathExists():
schema = Schema(PathExists())
assert_raises(MultipleInvalid, schema, 3)
schema(os.path.abspath(__file__))
+
+
+def test_description():
+ marker = Marker(Schema(str), description='Hello')
+ assert marker.description == 'Hello'
+
+ optional = Optional('key', description='Hello')
+ assert optional.description == 'Hello'
+
+ exclusive = Exclusive('alpha', 'angles', description='Hello')
+ assert exclusive.description == 'Hello'
+
+ required = Required('key', description='Hello')
+ assert required.description == 'Hello'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/alecthomas/voluptuous.git@48d1ea8f0f07478e50da38935d7e452f80d8501b#egg=voluptuous
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: voluptuous
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
prefix: /opt/conda/envs/voluptuous
| [
"voluptuous/tests/tests.py::test_description"
] | [] | [
"voluptuous/tests/tests.py::test_exact_sequence",
"voluptuous/tests/tests.py::test_required",
"voluptuous/tests/tests.py::test_extra_with_required",
"voluptuous/tests/tests.py::test_iterate_candidates",
"voluptuous/tests/tests.py::test_in",
"voluptuous/tests/tests.py::test_not_in",
"voluptuous/tests/tests.py::test_contains",
"voluptuous/tests/tests.py::test_remove",
"voluptuous/tests/tests.py::test_extra_empty_errors",
"voluptuous/tests/tests.py::test_literal",
"voluptuous/tests/tests.py::test_class",
"voluptuous/tests/tests.py::test_email_validation",
"voluptuous/tests/tests.py::test_email_validation_with_none",
"voluptuous/tests/tests.py::test_email_validation_with_empty_string",
"voluptuous/tests/tests.py::test_email_validation_without_host",
"voluptuous/tests/tests.py::test_fqdn_url_validation",
"voluptuous/tests/tests.py::test_fqdn_url_without_domain_name",
"voluptuous/tests/tests.py::test_fqdnurl_validation_with_none",
"voluptuous/tests/tests.py::test_fqdnurl_validation_with_empty_string",
"voluptuous/tests/tests.py::test_fqdnurl_validation_without_host",
"voluptuous/tests/tests.py::test_url_validation",
"voluptuous/tests/tests.py::test_url_validation_with_none",
"voluptuous/tests/tests.py::test_url_validation_with_empty_string",
"voluptuous/tests/tests.py::test_url_validation_without_host",
"voluptuous/tests/tests.py::test_copy_dict_undefined",
"voluptuous/tests/tests.py::test_sorting",
"voluptuous/tests/tests.py::test_schema_extend",
"voluptuous/tests/tests.py::test_schema_extend_overrides",
"voluptuous/tests/tests.py::test_schema_extend_key_swap",
"voluptuous/tests/tests.py::test_subschema_extension",
"voluptuous/tests/tests.py::test_repr",
"voluptuous/tests/tests.py::test_list_validation_messages",
"voluptuous/tests/tests.py::test_nested_multiple_validation_errors",
"voluptuous/tests/tests.py::test_humanize_error",
"voluptuous/tests/tests.py::test_fix_157",
"voluptuous/tests/tests.py::test_range_exlcudes_nan",
"voluptuous/tests/tests.py::test_equal",
"voluptuous/tests/tests.py::test_unordered",
"voluptuous/tests/tests.py::test_maybe",
"voluptuous/tests/tests.py::test_empty_list_as_exact",
"voluptuous/tests/tests.py::test_empty_dict_as_exact",
"voluptuous/tests/tests.py::test_schema_decorator_match_with_args",
"voluptuous/tests/tests.py::test_schema_decorator_unmatch_with_args",
"voluptuous/tests/tests.py::test_schema_decorator_match_with_kwargs",
"voluptuous/tests/tests.py::test_schema_decorator_unmatch_with_kwargs",
"voluptuous/tests/tests.py::test_schema_decorator_match_return_with_args",
"voluptuous/tests/tests.py::test_schema_decorator_unmatch_return_with_args",
"voluptuous/tests/tests.py::test_schema_decorator_match_return_with_kwargs",
"voluptuous/tests/tests.py::test_schema_decorator_unmatch_return_with_kwargs",
"voluptuous/tests/tests.py::test_schema_decorator_return_only_match",
"voluptuous/tests/tests.py::test_schema_decorator_return_only_unmatch",
"voluptuous/tests/tests.py::test_schema_decorator_partial_match_called_with_args",
"voluptuous/tests/tests.py::test_schema_decorator_partial_unmatch_called_with_args",
"voluptuous/tests/tests.py::test_schema_decorator_partial_match_called_with_kwargs",
"voluptuous/tests/tests.py::test_schema_decorator_partial_unmatch_called_with_kwargs",
"voluptuous/tests/tests.py::test_unicode_as_key",
"voluptuous/tests/tests.py::test_number_validation_with_string",
"voluptuous/tests/tests.py::test_number_validation_with_invalid_precision_invalid_scale",
"voluptuous/tests/tests.py::test_number_validation_with_valid_precision_scale_yield_decimal_true",
"voluptuous/tests/tests.py::test_number_when_precision_scale_none_yield_decimal_true",
"voluptuous/tests/tests.py::test_number_when_precision_none_n_valid_scale_case1_yield_decimal_true",
"voluptuous/tests/tests.py::test_number_when_precision_none_n_valid_scale_case2_yield_decimal_true",
"voluptuous/tests/tests.py::test_number_when_precision_none_n_invalid_scale_yield_decimal_true",
"voluptuous/tests/tests.py::test_number_when_valid_precision_n_scale_none_yield_decimal_true",
"voluptuous/tests/tests.py::test_number_when_invalid_precision_n_scale_none_yield_decimal_true",
"voluptuous/tests/tests.py::test_number_validation_with_valid_precision_scale_yield_decimal_false",
"voluptuous/tests/tests.py::test_named_tuples_validate_as_tuples",
"voluptuous/tests/tests.py::test_datetime",
"voluptuous/tests/tests.py::test_date",
"voluptuous/tests/tests.py::test_ordered_dict",
"voluptuous/tests/tests.py::test_marker_hashable",
"voluptuous/tests/tests.py::test_validation_performance",
"voluptuous/tests/tests.py::test_IsDir",
"voluptuous/tests/tests.py::test_IsFile",
"voluptuous/tests/tests.py::test_PathExists"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,827 | 524 | [
"voluptuous/schema_builder.py"
] |
|
oasis-open__cti-python-stix2-97 | 07a5d3a98ea49f4c343d26f04810bf305c052c8a | 2017-11-01 16:53:30 | ef6dade6f6773edd14aa16a2e4566e50bf74cbb4 | diff --git a/stix2/sources/memory.py b/stix2/sources/memory.py
index 2d1705d..4d3943b 100644
--- a/stix2/sources/memory.py
+++ b/stix2/sources/memory.py
@@ -164,7 +164,7 @@ class MemorySink(DataSink):
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, "w") as f:
- f.write(str(Bundle(self._data.values(), allow_custom=allow_custom)))
+ f.write(str(Bundle(list(self._data.values()), allow_custom=allow_custom)))
save_to_file.__doc__ = MemoryStore.save_to_file.__doc__
| Add tests for Memory Data Stores, Sources, Sinks
From #58 | oasis-open/cti-python-stix2 | diff --git a/stix2/test/test_memory.py b/stix2/test/test_memory.py
index 0603bf7..7a00029 100644
--- a/stix2/test/test_memory.py
+++ b/stix2/test/test_memory.py
@@ -1,3 +1,6 @@
+import os
+import shutil
+
import pytest
from stix2 import (Bundle, Campaign, CustomObject, Filter, MemorySource,
@@ -166,6 +169,22 @@ def test_memory_store_query_multiple_filters(mem_store):
assert len(resp) == 1
+def test_memory_store_save_load_file(mem_store):
+ filename = 'memory_test/mem_store.json'
+ mem_store.save_to_file(filename)
+ contents = open(os.path.abspath(filename)).read()
+
+ assert '"id": "indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f",' in contents
+ assert '"id": "indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f",' in contents
+
+ mem_store2 = MemoryStore()
+ mem_store2.load_from_file(filename)
+ assert mem_store2.get("indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f")
+ assert mem_store2.get("indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f")
+
+ shutil.rmtree(os.path.dirname(filename))
+
+
def test_memory_store_add_stix_object_str(mem_store):
# add stix object string
camp_id = "campaign--111111b6-1112-4fb0-111b-b111107ca70a"
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
antlr4-python3-runtime==4.9.3
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
bleach==4.1.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
distlib==0.3.9
docutils==0.18.1
entrypoints==0.4
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
ipython-genutils==0.2.0
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
nodeenv==1.6.0
packaging==21.3
pandocfilters==1.5.1
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
requests==2.27.1
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-prompt==1.5.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/oasis-open/cti-python-stix2.git@07a5d3a98ea49f4c343d26f04810bf305c052c8a#egg=stix2
stix2-patterns==2.0.0
taxii2-client==2.3.0
testpath==0.6.0
toml==0.10.2
tomli==1.2.3
tornado==6.1
tox==3.28.0
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
webencodings==0.5.1
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- antlr4-python3-runtime==4.9.3
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- bleach==4.1.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.18.1
- entrypoints==0.4
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- ipython-genutils==0.2.0
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- nodeenv==1.6.0
- packaging==21.3
- pandocfilters==1.5.1
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- requests==2.27.1
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-prompt==1.5.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- stix2-patterns==2.0.0
- taxii2-client==2.3.0
- testpath==0.6.0
- toml==0.10.2
- tomli==1.2.3
- tornado==6.1
- tox==3.28.0
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_memory.py::test_memory_store_save_load_file"
] | [] | [
"stix2/test/test_memory.py::test_memory_source_get",
"stix2/test/test_memory.py::test_memory_source_get_nonexistant_object",
"stix2/test/test_memory.py::test_memory_store_all_versions",
"stix2/test/test_memory.py::test_memory_store_query",
"stix2/test/test_memory.py::test_memory_store_query_single_filter",
"stix2/test/test_memory.py::test_memory_store_query_empty_query",
"stix2/test/test_memory.py::test_memory_store_query_multiple_filters",
"stix2/test/test_memory.py::test_memory_store_add_stix_object_str",
"stix2/test/test_memory.py::test_memory_store_add_stix_bundle_str",
"stix2/test/test_memory.py::test_memory_store_add_invalid_object",
"stix2/test/test_memory.py::test_memory_store_object_with_custom_property",
"stix2/test/test_memory.py::test_memory_store_object_with_custom_property_in_bundle",
"stix2/test/test_memory.py::test_memory_store_custom_object"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,835 | 166 | [
"stix2/sources/memory.py"
] |
|
openmrslab__suspect-93 | 758a6d0d41dd61617d6957ae400a7b0c366b477b | 2017-11-02 10:32:05 | 820e897294d90e08c4b91be7289e4ee9ebc6d009 | coveralls:
[](https://coveralls.io/builds/14003998)
Coverage increased (+0.05%) to 81.017% when pulling **b2195c51696b5c0198e491351886561d1d687103 on 90_image_resample** into **758a6d0d41dd61617d6957ae400a7b0c366b477b on master**.
| diff --git a/suspect/base.py b/suspect/base.py
index 6952aa2..8be873b 100644
--- a/suspect/base.py
+++ b/suspect/base.py
@@ -122,12 +122,12 @@ class ImageBase(np.ndarray):
@property
@requires_transform
def row_vector(self):
- return self.transform[:3, 1] / np.linalg.norm(self.transform[:3, 1])
+ return self.transform[:3, 0] / np.linalg.norm(self.transform[:3, 0])
@property
@requires_transform
def col_vector(self):
- return self.transform[:3, 0] / np.linalg.norm(self.transform[:3, 0])
+ return self.transform[:3, 1] / np.linalg.norm(self.transform[:3, 1])
@requires_transform
def _closest_axis(self, target_axis):
@@ -264,12 +264,13 @@ class ImageBase(np.ndarray):
+ JJ[..., np.newaxis] * col_vector \
+ KK[..., np.newaxis] * slice_vector + centre
+ image_coords = self.from_scanner(space_coords).reshape(*space_coords.shape)[..., ::-1].astype(np.int)
resampled = scipy.interpolate.interpn([np.arange(dim) for dim in self.shape],
self,
- self.from_scanner(space_coords)[..., ::-1],
+ image_coords,
method=method,
bounds_error=False,
- fill_value=0)
+ fill_value=0).squeeze()
transform = _transforms.transformation_matrix(row_vector,
col_vector,
| BUG: resampling of image volumes doesn't work properly for single slices
When transforming coordinates the arrays are squeezed which can remove singlet spatial dimensions, leading to issues in interpreting the coordinate arrays. | openmrslab/suspect | diff --git a/tests/test_mrs/test_base.py b/tests/test_mrs/test_base.py
index 72a8d6d..d5e437a 100644
--- a/tests/test_mrs/test_base.py
+++ b/tests/test_mrs/test_base.py
@@ -55,8 +55,8 @@ def test_find_axes():
[1, 1, 1])
base = suspect.base.ImageBase(np.zeros(1), transform=transform)
np.testing.assert_equal(base.axial_vector, base.slice_vector)
- np.testing.assert_equal(base.coronal_vector, base.row_vector)
- np.testing.assert_equal(base.sagittal_vector, base.col_vector)
+ np.testing.assert_equal(base.coronal_vector, base.col_vector)
+ np.testing.assert_equal(base.sagittal_vector, base.row_vector)
def test_find_axes_reversed():
@@ -66,8 +66,8 @@ def test_find_axes_reversed():
[1, 1, 1])
base = suspect.base.ImageBase(np.zeros(1), transform=transform)
np.testing.assert_equal(base.axial_vector, base.slice_vector)
- np.testing.assert_equal(base.coronal_vector, -base.row_vector)
- np.testing.assert_equal(base.sagittal_vector, -base.col_vector)
+ np.testing.assert_equal(base.coronal_vector, -base.col_vector)
+ np.testing.assert_equal(base.sagittal_vector, -base.row_vector)
def test_centre():
diff --git a/tests/test_mrs/test_image.py b/tests/test_mrs/test_image.py
index d1a760c..128bd97 100644
--- a/tests/test_mrs/test_image.py
+++ b/tests/test_mrs/test_image.py
@@ -32,3 +32,13 @@ def test_nifti_io():
nifti_volume = suspect.image.load_nifti("tests/test_data/tmp/nifti.nii")
np.testing.assert_equal(dicom_volume, nifti_volume)
np.testing.assert_allclose(dicom_volume.transform, nifti_volume.transform)
+
+
+def test_resample_single_slice():
+ source_volume = suspect.base.ImageBase(np.random.random((20, 20, 20)), transform=np.eye(4))
+ slc = source_volume.resample(source_volume.row_vector,
+ source_volume.col_vector,
+ [1, 20, 10],
+ centre=(5, 10, 0))
+ assert slc.shape == (20, 10)
+ np.testing.assert_equal(source_volume[0, :, :10], slc)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
asteval==0.9.26
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
lmfit==1.0.3
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
nibabel==3.2.2
numpy==1.19.5
packaging==21.3
pandocfilters==1.5.1
parse==1.20.2
Parsley==1.3
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pydicom==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyWavelets==1.1.1
pyzmq==25.1.2
requests==2.27.1
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/openmrslab/suspect.git@758a6d0d41dd61617d6957ae400a7b0c366b477b#egg=suspect
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
uncertainties==3.1.7
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: suspect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- asteval==0.9.26
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- lmfit==1.0.3
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pandocfilters==1.5.1
- parse==1.20.2
- parsley==1.3
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pydicom==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pywavelets==1.1.1
- pyzmq==25.1.2
- requests==2.27.1
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- uncertainties==3.1.7
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/suspect
| [
"tests/test_mrs/test_base.py::test_find_axes",
"tests/test_mrs/test_base.py::test_find_axes_reversed",
"tests/test_mrs/test_image.py::test_resample_single_slice"
] | [] | [
"tests/test_mrs/test_base.py::test_create_base",
"tests/test_mrs/test_base.py::test_base_transform",
"tests/test_mrs/test_base.py::test_transforms_fail",
"tests/test_mrs/test_base.py::test_centre",
"tests/test_mrs/test_base.py::test_resample",
"tests/test_mrs/test_image.py::test_simple_mask",
"tests/test_mrs/test_image.py::test_nifti_io"
] | [] | MIT License | 1,838 | 372 | [
"suspect/base.py"
] |
OpenMined__PySyft-392 | 34af4f778c2b2f1a16a5a8aa505c37b7d9b18009 | 2017-11-02 19:06:29 | 06ce023225dd613d8fb14ab2046135b93ab22376 | codecov[bot]: # [Codecov](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=h1) Report
> Merging [#392](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=desc) into [master](https://codecov.io/gh/OpenMined/PySyft/commit/34af4f778c2b2f1a16a5a8aa505c37b7d9b18009?src=pr&el=desc) will **increase** coverage by `<.01%`.
> The diff coverage is `n/a`.
[](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #392 +/- ##
==========================================
+ Coverage 99.52% 99.53% +<.01%
==========================================
Files 4 4
Lines 1701 1725 +24
==========================================
+ Hits 1693 1717 +24
Misses 8 8
```
| [Impacted Files](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [PySyft/tests/test\_math.py](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=tree#diff-UHlTeWZ0L3Rlc3RzL3Rlc3RfbWF0aC5weQ==) | `100% <0%> (ø)` | :arrow_up: |
| [PySyft/tests/test\_tensor.py](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=tree#diff-UHlTeWZ0L3Rlc3RzL3Rlc3RfdGVuc29yLnB5) | `99.35% <0%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=footer). Last update [34af4f7...8e876e2](https://codecov.io/gh/OpenMined/PySyft/pull/392?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
bharathgs: Also, update the *__init__.py* as well. | diff --git a/syft/__init__.py b/syft/__init__.py
index 43b3fa7b10..cada8b5181 100644
--- a/syft/__init__.py
+++ b/syft/__init__.py
@@ -7,7 +7,8 @@ from syft import nonlin
from syft.tensor import equal, TensorBase
from syft.math import cumprod, cumsum, ceil, dot, matmul, addmm, addcmul
from syft.math import addcdiv, addmv, bmm, addbmm, baddbmm, transpose
-from syft.math import unsqueeze, zeros, ones, rand, randn, mm, fmod, diag, lerp, renorm, numel
+from syft.math import unsqueeze, zeros, ones, rand, randn, mm, fmod, diag
+from syft.math import lerp, renorm, numel, cross
s = str(he)
s += str(nn)
@@ -26,3 +27,4 @@ s += str(fmod)
s += str(lerp)
s += str(numel)
s += str(renorm)
+s += str(cross)
diff --git a/syft/math.py b/syft/math.py
index 4d395a6a0d..207942c75b 100644
--- a/syft/math.py
+++ b/syft/math.py
@@ -20,7 +20,7 @@ __all__ = [
'cumprod', 'cumsum', 'ceil', 'dot', 'floor', 'matmul', 'addmm', 'addcmul',
'addcdiv', 'addmv', 'bmm', 'addbmm', 'baddbmm', 'sigmoid', 'unsqueeze',
'sin', 'sinh', 'cos', 'cosh', 'tan', 'tanh', 'zeros', 'ones', 'rand',
- 'randn', 'mm', 'fmod', 'diag', 'lerp', 'renorm', 'numel'
+ 'randn', 'mm', 'fmod', 'diag', 'lerp', 'renorm', 'numel', 'cross'
]
@@ -1008,3 +1008,49 @@ def split(tensor, split_size, axis=0):
list_ = np.array_split(tensor.data, split_according, axis)
return list(map(lambda x: TensorBase(x), list_))
+
+
+def cross(input, other, dim=-1):
+ """
+ Computes cross products between two tensors in the given dimension
+ The two vectors must have the same size, and the size of the dim
+ dimension should be 3.
+
+ Parameters
+ ----------
+
+ input: TensorBase
+ the first input tensor
+
+ other: TensorBase
+ the second input tensor
+
+ dim: int, optional
+ the dimension to take the cross-product in. Default is -1
+
+ Returns
+ -------
+ TensorBase: The result Tensor
+ """
+ input = _ensure_tensorbase(input)
+ other = _ensure_tensorbase(other)
+
+ if input.encrypted or other.encrypted:
+ return NotImplemented
+
+ # Verify that the shapes of both vectors are same
+ if input.shape() != other.shape():
+ raise ValueError('inconsistent dimensions {} and {}'.format(
+ input.shape(), other.shape()))
+
+ # verify that the given dim is valid
+ if dim < -len(input.shape()) or dim >= len(input.shape()):
+ raise ValueError('invalid dim. Should be between {} and {}'.format(
+ -len(input.shape()), len(input.shape()) - 1))
+
+ # verify that the size of dimension dim is 3
+ if input.shape()[dim] != 3:
+ raise ValueError('size of dimension {}(dim) is {}. Should be 3'.format(
+ dim, input.shape()[-1]))
+
+ return TensorBase(np.cross(input, other, axis=dim))
diff --git a/syft/tensor.py b/syft/tensor.py
index 89f23c3b05..f5dadf7f7d 100644
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -3605,3 +3605,24 @@ class TensorBase(object):
return NotImplemented
return syft.math.split(self, split_size, axis)
+
+ def cross(self, tensor, dim=-1):
+ """
+ Computes cross products between two tensors in the given dimension
+ The two vectors must have the same size, and the size of the dim
+ dimension should be 3.
+
+ Parameters
+ ----------
+
+ tensor: TensorBase
+ the second input tensor
+
+ dim: int, optional
+ the dimension to take the cross-product in. Default is -1
+
+ Returns
+ -------
+ TensorBase: The result Tensor
+ """
+ return syft.math.cross(self, tensor, dim)
| Implement Default cross Functionality for Base Tensor Type.
<!-- Please make sure that you review this: https://github.com/OpenMined/Docs/blob/master/contributing/guidelines.md -->
<!-- If you are looking to file a bug make sure to look at the .github/BUG_ISSUE_TEMPLATE.md -->
#### User story:
As a Data Scientist using Syft's Base Tensor type, I want to leverage a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete `cross` should return a new tensor. For a reference on the operation these perform check out [PyTorch's](http://pytorch.org/docs/master/torch.html#torch.cross) documentation.
<!-- Provide a detailed explaination about the proposed feature, you can draw inspiration from something like this:
https://github.com/OpenMined/PySyft/issues/227 or https://github.com/OpenMined/PySyft/issues/12 -->
#### Acceptance Criteria:
<!-- Provide an outline af all the things that needs to be addressed in order to close this Issue,
be as descriptive as possible -->
- [ ] If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- [ ] corresponding unit tests demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors.
- [ ] inline documentation as described over [here](https://github.com/OpenMined/PySyft/blob/85bc68e81a2f4bfc0f0bf6c4252b88d6d7b54004/syft/math.py#L5).
<!-- Thanks for your contributions! -->
| OpenMined/PySyft | diff --git a/tests/test_math.py b/tests/test_math.py
index 09aad0a9f7..c90a0c3cb9 100644
--- a/tests/test_math.py
+++ b/tests/test_math.py
@@ -441,3 +441,31 @@ class SplitTests(unittest.TestCase):
self.assertTrue(syft.equal(split.shape(), target_shape))
self.assertTrue(syft.equal(t.narrow(axis, start, target_shape[axis]), split))
start += target_shape[axis]
+
+
+class CrossTests(unittest.TestCase):
+ def setUp(self):
+ self.a = np.eye(2, 3)
+ self.b = np.ones((2, 3))
+
+ def test_cross(self):
+ a = TensorBase(self.a)
+ b = TensorBase(self.b)
+
+ # Verify that the expected result is retuned
+ expected_result = np.array([0, -1, 1, 1, 0, -1]).reshape(2, 3)
+ self.assertTrue(np.array_equal(a.cross(b), expected_result))
+
+ # Verify that ValueError is thrown when dimension is out of bounds
+ self.assertRaises(ValueError, a.cross, b, 5)
+
+ # Verify that ValueError is thrown when size dimension dim != 3
+ self.assertRaises(ValueError, a.cross, b, 0)
+
+ # Verify that ValueError is thrown when dimensions don't match
+ a = TensorBase(self.a.reshape(3, 2))
+ self.assertRaises(ValueError, a.cross, b)
+
+ # Verify that NotImplemented is returned if Tensor is encrypted
+ a = TensorBase(self.a, True)
+ self.assertEqual(a.cross(b), NotImplemented)
diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index 3c9052c3bf..9d7143a875 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -1635,6 +1635,20 @@ class SplitTests(unittest.TestCase):
start += target_shape[axis]
+class CrossTests(unittest.TestCase):
+ def setUp(self):
+ self.a = np.eye(2, 3)
+ self.b = np.ones((2, 3))
+
+ def test_cross(self):
+ a = TensorBase(self.a)
+ b = TensorBase(self.b)
+ expected_result = np.array([0, -1, 1, 1, 0, -1]).reshape(2, 3)
+
+ # Verify that the expected result is retuned
+ self.assertTrue(np.array_equal(a.cross(b), expected_result))
+
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | PySyft/hydrogen | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"line_profiler",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
line_profiler==4.2.0
numpy==1.26.4
packaging==24.2
phe==1.5.0
pluggy==1.5.0
pyRserve==1.0.4
pytest==8.3.5
scipy==1.13.1
-e git+https://github.com/OpenMined/PySyft.git@34af4f778c2b2f1a16a5a8aa505c37b7d9b18009#egg=syft
tomli==2.2.1
| name: PySyft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- line-profiler==4.2.0
- numpy==1.26.4
- packaging==24.2
- phe==1.5.0
- pluggy==1.5.0
- pyrserve==1.0.4
- pytest==8.3.5
- scipy==1.13.1
- tomli==2.2.1
prefix: /opt/conda/envs/PySyft
| [
"tests/test_math.py::CrossTests::test_cross",
"tests/test_tensor.py::CrossTests::test_cross"
] | [
"tests/test_tensor.py::ScatterTests::test_scatter_numerical_0",
"tests/test_tensor.py::ScatterTests::test_scatter_numerical_1",
"tests/test_tensor.py::ScatterTests::test_scatter_numerical_2",
"tests/test_tensor.py::ScatterTests::test_scatter_numerical_3",
"tests/test_tensor.py::ScatterTests::test_scatter_numerical_4",
"tests/test_tensor.py::ScatterTests::test_scatter_numerical_5",
"tests/test_tensor.py::ScatterTests::test_scatter_numerical_6",
"tests/test_tensor.py::UnfoldTest::test_unfold_big",
"tests/test_tensor.py::UnfoldTest::test_unfold_small"
] | [
"tests/test_math.py::ConvenienceTests::test_ones",
"tests/test_math.py::ConvenienceTests::test_rand",
"tests/test_math.py::ConvenienceTests::test_zeros",
"tests/test_math.py::DotTests::test_dot_float",
"tests/test_math.py::DotTests::test_dot_int",
"tests/test_math.py::DiagTests::test_one_dim_tensor_below_diag",
"tests/test_math.py::DiagTests::test_one_dim_tensor_main_diag",
"tests/test_math.py::DiagTests::test_one_dim_tensor_upper_diag",
"tests/test_math.py::DiagTests::test_two_dim_tensor_below_diag",
"tests/test_math.py::DiagTests::test_two_dim_tensor_main_diag",
"tests/test_math.py::DiagTests::test_two_dim_tensor_upper_diag",
"tests/test_math.py::CeilTests::test_ceil",
"tests/test_math.py::FloorTests::test_floor",
"tests/test_math.py::SinTests::test_sin",
"tests/test_math.py::SinhTests::test_sinh",
"tests/test_math.py::CosTests::test_cos",
"tests/test_math.py::CoshTests::test_cosh",
"tests/test_math.py::TanTests::test_tan",
"tests/test_math.py::TanhTests::test_tanh",
"tests/test_math.py::CumsumTests::test_cumsum",
"tests/test_math.py::CumprodTests::test_cumprod",
"tests/test_math.py::SigmoidTests::test_sigmoid",
"tests/test_math.py::MatmulTests::test_matmul_1d_float",
"tests/test_math.py::MatmulTests::test_matmul_1d_int",
"tests/test_math.py::MatmulTests::test_matmul_2d_float",
"tests/test_math.py::MatmulTests::test_matmul_2d_identity",
"tests/test_math.py::MatmulTests::test_matmul_2d_int",
"tests/test_math.py::AddmmTests::test_addmm_1d",
"tests/test_math.py::AddmmTests::test_addmm_2d",
"tests/test_math.py::AddcmulTests::test_addcmul_1d",
"tests/test_math.py::AddcmulTests::test_addcmul_2d",
"tests/test_math.py::AddcdivTests::test_addcdiv_1d",
"tests/test_math.py::AddcdivTests::test_addcdiv_2d",
"tests/test_math.py::AddmvTests::test_addmv",
"tests/test_math.py::BmmTests::test_bmm",
"tests/test_math.py::BmmTests::test_bmm_for_correct_size_output",
"tests/test_math.py::AddbmmTests::test_addbmm",
"tests/test_math.py::BaddbmmTests::test_baddbmm",
"tests/test_math.py::TransposeTests::test_transpose",
"tests/test_math.py::UnsqueezeTests::test_unsqueeze",
"tests/test_math.py::MmTests::test_mm_1d",
"tests/test_math.py::MmTests::test_mm_2d",
"tests/test_math.py::MmTests::test_mm_3d",
"tests/test_math.py::FmodTests::test_fmod_number",
"tests/test_math.py::FmodTests::test_fmod_tensor",
"tests/test_math.py::LerpTests::test_lerp",
"tests/test_math.py::NumelTests::test_numel_float",
"tests/test_math.py::NumelTests::test_numel_int",
"tests/test_math.py::RenormTests::test_3d_tensor_renorm",
"tests/test_math.py::RenormTests::test_float_renorm",
"tests/test_math.py::RenormTests::test_int_renorm",
"tests/test_math.py::MultinomialTests::test_multinomial",
"tests/test_math.py::SplitTests::test_split",
"tests/test_tensor.py::DimTests::test_as_view",
"tests/test_tensor.py::DimTests::test_dim_one",
"tests/test_tensor.py::DimTests::test_resize",
"tests/test_tensor.py::DimTests::test_resize_as",
"tests/test_tensor.py::DimTests::test_view",
"tests/test_tensor.py::DiagTests::test_one_dim_tensor_below_diag",
"tests/test_tensor.py::DiagTests::test_one_dim_tensor_main_diag",
"tests/test_tensor.py::DiagTests::test_one_dim_tensor_upper_diag",
"tests/test_tensor.py::DiagTests::test_two_dim_tensor_below_diag",
"tests/test_tensor.py::DiagTests::test_two_dim_tensor_main_diag",
"tests/test_tensor.py::DiagTests::test_two_dim_tensor_upper_diag",
"tests/test_tensor.py::AddTests::test_inplace",
"tests/test_tensor.py::AddTests::test_scalar",
"tests/test_tensor.py::AddTests::test_simple",
"tests/test_tensor.py::CeilTests::test_ceil",
"tests/test_tensor.py::CeilTests::test_ceil_",
"tests/test_tensor.py::ZeroTests::test_zero",
"tests/test_tensor.py::FloorTests::test_floor",
"tests/test_tensor.py::SubTests::test_inplace",
"tests/test_tensor.py::SubTests::test_scalar",
"tests/test_tensor.py::SubTests::test_simple",
"tests/test_tensor.py::MaxTests::test_axis",
"tests/test_tensor.py::MaxTests::test_no_dim",
"tests/test_tensor.py::MultTests::test_inplace",
"tests/test_tensor.py::MultTests::test_scalar",
"tests/test_tensor.py::MultTests::test_simple",
"tests/test_tensor.py::DivTests::test_inplace",
"tests/test_tensor.py::DivTests::test_scalar",
"tests/test_tensor.py::DivTests::test_simple",
"tests/test_tensor.py::AbsTests::test_abs",
"tests/test_tensor.py::AbsTests::test_abs_",
"tests/test_tensor.py::ShapeTests::test_shape",
"tests/test_tensor.py::SqrtTests::test_sqrt",
"tests/test_tensor.py::SqrtTests::test_sqrt_",
"tests/test_tensor.py::SumTests::test_dim_is_not_none_int",
"tests/test_tensor.py::SumTests::test_dim_none_int",
"tests/test_tensor.py::EqualTests::test_equal",
"tests/test_tensor.py::EqualTests::test_equal_operation",
"tests/test_tensor.py::EqualTests::test_inequality_operation",
"tests/test_tensor.py::EqualTests::test_not_equal",
"tests/test_tensor.py::EqualTests::test_shape_inequality_operation",
"tests/test_tensor.py::EqualTests::test_shape_not_equal",
"tests/test_tensor.py::SigmoidTests::test_sigmoid",
"tests/test_tensor.py::AddmmTests::test_addmm_1d",
"tests/test_tensor.py::AddmmTests::test_addmm_1d_",
"tests/test_tensor.py::AddmmTests::test_addmm_2d",
"tests/test_tensor.py::AddmmTests::test_addmm_2d_",
"tests/test_tensor.py::AddcmulTests::test_addcmul_1d",
"tests/test_tensor.py::AddcmulTests::test_addcmul_1d_",
"tests/test_tensor.py::AddcmulTests::test_addcmul_2d",
"tests/test_tensor.py::AddcmulTests::test_addcmul_2d_",
"tests/test_tensor.py::AddcdivTests::test_addcdiv_1d",
"tests/test_tensor.py::AddcdivTests::test_addcdiv_1d_",
"tests/test_tensor.py::AddcdivTests::test_addcdiv_2d",
"tests/test_tensor.py::AddcdivTests::test_addcdiv_2d_",
"tests/test_tensor.py::AddmvTests::test_addmv",
"tests/test_tensor.py::AddmvTests::test_addmv_",
"tests/test_tensor.py::BmmTests::test_bmm",
"tests/test_tensor.py::BmmTests::test_bmm_size",
"tests/test_tensor.py::AddbmmTests::test_addbmm",
"tests/test_tensor.py::AddbmmTests::test_addbmm_",
"tests/test_tensor.py::BaddbmmTests::test_baddbmm",
"tests/test_tensor.py::BaddbmmTests::test_baddbmm_",
"tests/test_tensor.py::TransposeTests::test_t",
"tests/test_tensor.py::TransposeTests::test_transpose",
"tests/test_tensor.py::TransposeTests::test_transpose_",
"tests/test_tensor.py::UnsqueezeTests::test_unsqueeze",
"tests/test_tensor.py::UnsqueezeTests::test_unsqueeze_",
"tests/test_tensor.py::ExpTests::test_exp",
"tests/test_tensor.py::ExpTests::test_exp_",
"tests/test_tensor.py::FracTests::test_frac",
"tests/test_tensor.py::FracTests::test_frac_",
"tests/test_tensor.py::RsqrtTests::test_rsqrt",
"tests/test_tensor.py::RsqrtTests::test_rsqrt_",
"tests/test_tensor.py::SignTests::test_sign",
"tests/test_tensor.py::SignTests::test_sign_",
"tests/test_tensor.py::NumpyTests::test_numpy",
"tests/test_tensor.py::ReciprocalTests::test_reciprocal",
"tests/test_tensor.py::ReciprocalTests::test_reciprocal_",
"tests/test_tensor.py::LogTests::test_log",
"tests/test_tensor.py::LogTests::test_log_",
"tests/test_tensor.py::LogTests::test_log_1p",
"tests/test_tensor.py::LogTests::test_log_1p_",
"tests/test_tensor.py::ClampTests::test_clamp_float",
"tests/test_tensor.py::ClampTests::test_clamp_float_in_place",
"tests/test_tensor.py::ClampTests::test_clamp_int",
"tests/test_tensor.py::ClampTests::test_clamp_int_in_place",
"tests/test_tensor.py::CloneTests::test_clone",
"tests/test_tensor.py::ChunkTests::test_chunk",
"tests/test_tensor.py::ChunkTests::test_chunk_same_size",
"tests/test_tensor.py::GtTests::test_gt__in_place_with_number",
"tests/test_tensor.py::GtTests::test_gt__in_place_with_tensor",
"tests/test_tensor.py::GtTests::test_gt_with_encrypted",
"tests/test_tensor.py::GtTests::test_gt_with_number",
"tests/test_tensor.py::GtTests::test_gt_with_tensor",
"tests/test_tensor.py::GeTests::test_ge__in_place_with_number",
"tests/test_tensor.py::GeTests::test_ge__in_place_with_tensor",
"tests/test_tensor.py::GeTests::test_ge_with_encrypted",
"tests/test_tensor.py::GeTests::test_ge_with_number",
"tests/test_tensor.py::GeTests::test_ge_with_tensor",
"tests/test_tensor.py::LtTests::test_lt__in_place_with_number",
"tests/test_tensor.py::LtTests::test_lt__in_place_with_tensor",
"tests/test_tensor.py::LtTests::test_lt_with_encrypted",
"tests/test_tensor.py::LtTests::test_lt_with_number",
"tests/test_tensor.py::LtTests::test_lt_with_tensor",
"tests/test_tensor.py::LeTests::test_le__in_place_with_number",
"tests/test_tensor.py::LeTests::test_le__in_place_with_tensor",
"tests/test_tensor.py::LeTests::test_le_with_encrypted",
"tests/test_tensor.py::LeTests::test_le_with_number",
"tests/test_tensor.py::LeTests::test_le_with_tensor",
"tests/test_tensor.py::BernoulliTests::test_bernoulli",
"tests/test_tensor.py::BernoulliTests::test_bernoulli_",
"tests/test_tensor.py::MultinomialTests::test_multinomial",
"tests/test_tensor.py::CauchyTests::test_cauchy_",
"tests/test_tensor.py::UniformTests::test_uniform",
"tests/test_tensor.py::UniformTests::test_uniform_",
"tests/test_tensor.py::GeometricTests::test_geometric_",
"tests/test_tensor.py::NormalTests::test_normal",
"tests/test_tensor.py::NormalTests::test_normal_",
"tests/test_tensor.py::FillTests::test_fill_",
"tests/test_tensor.py::TopkTests::test_topK",
"tests/test_tensor.py::TolistTests::test_to_list",
"tests/test_tensor.py::TraceTests::test_trace",
"tests/test_tensor.py::RoundTests::test_round",
"tests/test_tensor.py::RoundTests::test_round_",
"tests/test_tensor.py::RepeatTests::test_repeat",
"tests/test_tensor.py::PowTests::test_pow",
"tests/test_tensor.py::PowTests::test_pow_",
"tests/test_tensor.py::NegTests::test_neg",
"tests/test_tensor.py::NegTests::test_neg_",
"tests/test_tensor.py::SinTests::test_sin",
"tests/test_tensor.py::SinhTests::test_sinh",
"tests/test_tensor.py::CosTests::test_cos",
"tests/test_tensor.py::CoshTests::test_cosh",
"tests/test_tensor.py::TanTests::test_tan",
"tests/test_tensor.py::TanhTests::test_tanh_",
"tests/test_tensor.py::ProdTests::test_prod",
"tests/test_tensor.py::RandomTests::test_random_",
"tests/test_tensor.py::NonzeroTests::test_non_zero",
"tests/test_tensor.py::CumprodTest::test_cumprod",
"tests/test_tensor.py::CumprodTest::test_cumprod_",
"tests/test_tensor.py::SqueezeTests::test_squeeze",
"tests/test_tensor.py::ExpandAsTests::test_expand_as",
"tests/test_tensor.py::MeanTests::test_mean",
"tests/test_tensor.py::NotEqualTests::test_ne",
"tests/test_tensor.py::NotEqualTests::test_ne_",
"tests/test_tensor.py::IndexTests::test_index",
"tests/test_tensor.py::IndexTests::test_index_add_",
"tests/test_tensor.py::IndexTests::test_index_copy_",
"tests/test_tensor.py::IndexTests::test_index_fill_",
"tests/test_tensor.py::IndexTests::test_index_select",
"tests/test_tensor.py::IndexTests::test_index_slice_notation",
"tests/test_tensor.py::IndexTests::test_indexing",
"tests/test_tensor.py::GatherTests::test_gather_numerical_1",
"tests/test_tensor.py::GatherTests::test_gather_numerical_2",
"tests/test_tensor.py::ScatterTests::test_scatter_dim_out_Of_range",
"tests/test_tensor.py::ScatterTests::test_scatter_index_out_of_range",
"tests/test_tensor.py::ScatterTests::test_scatter_index_src_dimension_mismatch",
"tests/test_tensor.py::ScatterTests::test_scatter_index_type",
"tests/test_tensor.py::RemainderTests::test_remainder_",
"tests/test_tensor.py::RemainderTests::test_remainder_broadcasting",
"tests/test_tensor.py::MvTests::test_mv",
"tests/test_tensor.py::MvTests::test_mv_tensor",
"tests/test_tensor.py::NarrowTests::test_narrow_float",
"tests/test_tensor.py::NarrowTests::test_narrow_int",
"tests/test_tensor.py::MaskedScatterTests::test_masked_scatter_1",
"tests/test_tensor.py::MaskedScatterTests::test_masked_scatter_braodcasting_1",
"tests/test_tensor.py::MaskedScatterTests::test_masked_scatter_braodcasting_2",
"tests/test_tensor.py::MaskedFillTests::test_masked_fill_",
"tests/test_tensor.py::MaskedFillTests::test_masked_fill_broadcasting",
"tests/test_tensor.py::MaskedSelectTests::test_masked_select",
"tests/test_tensor.py::MaskedSelectTests::test_masked_select_broadcasting_1",
"tests/test_tensor.py::MaskedSelectTests::test_masked_select_broadcasting_2",
"tests/test_tensor.py::MaskedSelectTests::test_tensor_base_masked_select",
"tests/test_tensor.py::EqTests::test_eq_in_place_with_number",
"tests/test_tensor.py::EqTests::test_eq_in_place_with_tensor",
"tests/test_tensor.py::EqTests::test_eq_with_number",
"tests/test_tensor.py::EqTests::test_eq_with_tensor",
"tests/test_tensor.py::MmTests::test_mm_1d",
"tests/test_tensor.py::MmTests::test_mm_2d",
"tests/test_tensor.py::MmTests::test_mm_3d",
"tests/test_tensor.py::NewTensorTests::test_encrypted_error",
"tests/test_tensor.py::NewTensorTests::test_return_new_float_tensor",
"tests/test_tensor.py::NewTensorTests::test_return_new_int_tensor",
"tests/test_tensor.py::HalfTests::test_half_1",
"tests/test_tensor.py::HalfTests::test_half_2",
"tests/test_tensor.py::FmodTests::test_fmod_number",
"tests/test_tensor.py::FmodTests::test_fmod_tensor",
"tests/test_tensor.py::FmodTestsBis::test_fmod_number",
"tests/test_tensor.py::FmodTestsBis::test_fmod_tensor",
"tests/test_tensor.py::NumelTests::test_numel_2d",
"tests/test_tensor.py::NumelTests::test_numel_3d",
"tests/test_tensor.py::NumelTests::test_numel_encrypted",
"tests/test_tensor.py::NumelTests::test_numel_float",
"tests/test_tensor.py::NumelTests::test_numel_int",
"tests/test_tensor.py::NumelTests::test_numel_str",
"tests/test_tensor.py::NelementTests::test_nelement_2d",
"tests/test_tensor.py::NelementTests::test_nelement_3d",
"tests/test_tensor.py::NelementTests::test_nelement_encrypted",
"tests/test_tensor.py::NelementTests::test_nelement_float",
"tests/test_tensor.py::NelementTests::test_nelement_int",
"tests/test_tensor.py::NelementTests::test_nelement_str",
"tests/test_tensor.py::SizeTests::test_size_2d",
"tests/test_tensor.py::SizeTests::test_size_3d",
"tests/test_tensor.py::SizeTests::test_size_float",
"tests/test_tensor.py::SizeTests::test_size_int",
"tests/test_tensor.py::SizeTests::test_size_str",
"tests/test_tensor.py::LerpTests::test_lerp",
"tests/test_tensor.py::LerpTests::test_lerp_",
"tests/test_tensor.py::ModeTests::testMode_axis_None",
"tests/test_tensor.py::ModeTests::testMode_axis_col",
"tests/test_tensor.py::ModeTests::testMode_axis_row",
"tests/test_tensor.py::RenormTests::testRenorm",
"tests/test_tensor.py::RenormTests::testRenorm_",
"tests/test_tensor.py::StrideTests::test_stride",
"tests/test_tensor.py::SplitTests::test_split"
] | [] | Apache License 2.0 | 1,840 | 1,161 | [
"syft/__init__.py",
"syft/math.py",
"syft/tensor.py"
] |
minio__minio-py-591 | 41240393e679226942d2300f281602468b20c7b2 | 2017-11-02 22:09:58 | 41240393e679226942d2300f281602468b20c7b2 | poornas: @vadmeste, appveyor and travis are both failing.
vadmeste: Fixed @poornas | diff --git a/minio/api.py b/minio/api.py
index cb7e207..3c29b3a 100644
--- a/minio/api.py
+++ b/minio/api.py
@@ -1605,7 +1605,12 @@ class Minio(object):
is_non_empty_string(object_name)
is_non_empty_string(upload_id)
- data = xml_marshal_complete_multipart_upload(uploaded_parts)
+ # Order uploaded parts as required by S3 specification
+ ordered_parts = []
+ for part in sorted(uploaded_parts.keys()):
+ ordered_parts.append(uploaded_parts[part])
+
+ data = xml_marshal_complete_multipart_upload(ordered_parts)
sha256_hex = get_sha256_hexdigest(data)
md5_base64 = get_md5_base64digest(data)
diff --git a/minio/xml_marshal.py b/minio/xml_marshal.py
index e450d6b..9c62aa0 100644
--- a/minio/xml_marshal.py
+++ b/minio/xml_marshal.py
@@ -51,17 +51,17 @@ def xml_marshal_complete_multipart_upload(uploaded_parts):
"""
Marshal's complete multipart upload request based on *uploaded_parts*.
- :param uploaded_parts: List of all uploaded parts ordered in the
- way they were uploaded.
+ :param uploaded_parts: List of all uploaded parts, ordered by part number.
:return: Marshalled XML data.
"""
root = s3_xml.Element('CompleteMultipartUpload', {'xmlns': _S3_NAMESPACE})
- for part_number in uploaded_parts.keys():
+ for uploaded_part in uploaded_parts:
+ part_number = uploaded_part.part_number
part = s3_xml.SubElement(root, 'Part')
part_num = s3_xml.SubElement(part, 'PartNumber')
part_num.text = str(part_number)
etag = s3_xml.SubElement(part, 'ETag')
- etag.text = '"' + uploaded_parts[part_number].etag + '"'
+ etag.text = '"' + uploaded_part.etag + '"'
data = io.BytesIO()
s3_xml.ElementTree(root).write(data, encoding=None,
xml_declaration=False)
| Part order error with fput_object
When trying to upload a 200MB file to minio server with minio-py I'm getting an error.
minio-py version 2.2.5
minio server RELEASE.2017-09-29T19-16-56Z
```
File "/usr/local/lib/python3.6/dist-packages/minio/api.py", line 561, in fput_object
content_type, metadata)
File "/usr/local/lib/python3.6/dist-packages/minio/api.py", line 767, in put_object
data, length, metadata=metadata)
File "/usr/local/lib/python3.6/dist-packages/minio/api.py", line 1561, in _stream_put_object
metadata=metadata)
File "/usr/local/lib/python3.6/dist-packages/minio/api.py", line 1632, in _complete_multipart_upload
content_sha256=sha256_hex)
File "/usr/local/lib/python3.6/dist-packages/minio/api.py", line 1781, in _url_open
object_name).get_exception()
minio.error.InvalidPartOrder: InvalidPartOrder: message: The list of parts was not in ascending order.Parts list must specified in order by part number.
```
Maybe related to this commit?
https://github.com/minio/minio-py/pull/563 | minio/minio-py | diff --git a/tests/unit/generate_xml_test.py b/tests/unit/generate_xml_test.py
index 18706b9..bdd1dfd 100644
--- a/tests/unit/generate_xml_test.py
+++ b/tests/unit/generate_xml_test.py
@@ -33,17 +33,16 @@ class GenerateRequestTest(TestCase):
b'<Part><PartNumber>2</PartNumber><ETag>"0c78aef83f66abc1fa1e8477f296d394"</ETag>' \
b'</Part><Part><PartNumber>3</PartNumber><ETag>"acbd18db4cc2f85cedef654fccc4a4d8"' \
b'</ETag></Part></CompleteMultipartUpload>'
- etags = {}
- etags = {
- 1: UploadPart('bucket', 'object', 'upload_id', 1,
+ etags = [
+ UploadPart('bucket', 'object', 'upload_id', 1,
'a54357aff0632cce46d942af68356b38',
None, 0),
- 2: UploadPart('bucket', 'object', 'upload_id', 2,
+ UploadPart('bucket', 'object', 'upload_id', 2,
'0c78aef83f66abc1fa1e8477f296d394',
None, 0),
- 3: UploadPart('bucket', 'object', 'upload_id', 3,
+ UploadPart('bucket', 'object', 'upload_id', 3,
'acbd18db4cc2f85cedef654fccc4a4d8',
None, 0),
- }
+ ]
actual_string = xml_marshal_complete_multipart_upload(etags)
eq_(expected_string, actual_string)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"mock",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/minio/minio-py.git@41240393e679226942d2300f281602468b20c7b2#egg=minio
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytz==2025.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: minio-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- mock==5.2.0
- nose==1.3.7
- pytz==2025.2
- urllib3==1.26.20
prefix: /opt/conda/envs/minio-py
| [
"tests/unit/generate_xml_test.py::GenerateRequestTest::test_generate_complete_multipart_upload"
] | [] | [
"tests/unit/generate_xml_test.py::GenerateRequestTest::test_generate_bucket_constraint"
] | [] | Apache License 2.0 | 1,841 | 507 | [
"minio/api.py",
"minio/xml_marshal.py"
] |
natasha__yargy-46 | 486a17df07ad1615b7a8d6ef2a4bfe3a39d30340 | 2017-11-06 14:56:21 | 486a17df07ad1615b7a8d6ef2a4bfe3a39d30340 | diff --git a/yargy/interpretation/attribute.py b/yargy/interpretation/attribute.py
index 6f6c264..ca320c6 100644
--- a/yargy/interpretation/attribute.py
+++ b/yargy/interpretation/attribute.py
@@ -55,16 +55,6 @@ class FactAttributeBase(Record):
)
-def prepare_normalized(attribute, item):
- if item is not None:
- if callable(item):
- return FunctionFactAttribute(attribute, item)
- else:
- return ConstFactAttribute(attribute, item)
- else:
- return NormalizedFactAttribute(attribute)
-
-
class FactAttribute(FactAttributeBase):
__attributes__ = ['fact', 'name', 'default']
@@ -76,8 +66,14 @@ class FactAttribute(FactAttributeBase):
def inflected(self, grammemes=None):
return InflectedFactAttribute(self, grammemes)
- def normalized(self, item=None):
- return prepare_normalized(self, item)
+ def normalized(self):
+ return NormalizedFactAttribute(self)
+
+ def const(self, value):
+ return ConstFactAttribute(self, value)
+
+ def custom(self, function):
+ return FunctionFactAttribute(self, function)
class RepeatableFactAttribute(FactAttributeBase):
| Allow define custom value in interpretation rules
Like this
```python
{
'interpretation': {
'attribute': Object.attribute,
'value': SomeEnum.value,
}
}
``` | natasha/yargy | diff --git a/yargy/tests/test_attribute.py b/yargy/tests/test_attribute.py
new file mode 100644
index 0000000..ea52d61
--- /dev/null
+++ b/yargy/tests/test_attribute.py
@@ -0,0 +1,29 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+
+from yargy import Parser, rule, fact
+from yargy.predicates import gram, dictionary
+
+Money = fact(
+ 'Money',
+ ['count', 'base', 'currency']
+)
+
+
+def test_constant_attribute():
+ MONEY_RULE = rule(
+ gram('INT').interpretation(
+ Money.count
+ ),
+ dictionary({'тысяча'}).interpretation(
+ Money.base.const(10**3)
+ ),
+ dictionary({'рубль', 'доллар'}).interpretation(
+ Money.currency
+ ),
+ ).interpretation(Money)
+
+ parser = Parser(MONEY_RULE)
+ matches = list(parser.match('1 тысяча рублей'))
+ assert matches[0].fact == Money(count=1, base=1000, currency='рублей')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backports.functools-lru-cache==1.3
DAWG-Python==0.7.2
docopt==0.6.2
exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
intervaltree==2.1.0
jellyfish==0.5.6
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pymorphy2==0.8
pymorphy2-dicts==2.4.393442.3710985
pytest==8.3.5
pytest-flake8==1.3.0
sortedcontainers==2.4.0
tomli==2.2.1
-e git+https://github.com/natasha/yargy.git@486a17df07ad1615b7a8d6ef2a4bfe3a39d30340#egg=yargy
| name: yargy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-functools-lru-cache==1.3
- dawg-python==0.7.2
- docopt==0.6.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- intervaltree==2.1.0
- jellyfish==0.5.6
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pymorphy2==0.8
- pymorphy2-dicts==2.4.393442.3710985
- pytest==8.3.5
- pytest-flake8==1.3.0
- sortedcontainers==2.4.0
- tomli==2.2.1
prefix: /opt/conda/envs/yargy
| [
"yargy/tests/test_attribute.py::test_constant_attribute"
] | [] | [] | [] | MIT License | 1,853 | 303 | [
"yargy/interpretation/attribute.py"
] |
|
planetlabs__planet-client-python-129 | 49da66f16774682e377dfd79eebe0f33ea47f8e1 | 2017-11-09 21:30:58 | 0d5160b40fbcdc6bd97059270d78be03f33ea03c | ischneider: Once #130 is merged, the test should pass again.
ischneider: Updated with the ability to extract the filter from a provided saved search in the JSON from the `--filter-json` flag. CI still expected to fail until #131 is merged | diff --git a/planet/scripts/util.py b/planet/scripts/util.py
index 421631d..21dddf5 100644
--- a/planet/scripts/util.py
+++ b/planet/scripts/util.py
@@ -79,20 +79,23 @@ def check_writable(dirpath):
def filter_from_opts(**kw):
- '''Build a AND filter from the provided filter_in OR kwargs defaulting to an
- empty 'and' filter (@todo: API workaround).
+ '''Build a AND filter from the provided kwargs defaulting to an
+ empty 'and' filter (@todo: API workaround) if nothing is provided.
+
+ If the 'filter_json' argument is provided, this will be assumed to contain
+ a filter specification and will be anded with other filters. If the
+ 'filter_json' is a search, the search filter value will be used.
+
All kw values should be tuple or list
'''
filter_in = kw.pop('filter_json', None)
active = and_filter_from_opts(kw)
- no_filters = len(active['config']) == 0
- if no_filters and not filter_in:
- return filters.and_filter()
- if not no_filters and filter_in:
- raise click.ClickException(
- 'Specify filter options or provide using --filter-json, not both')
if filter_in:
- active = filter_in
+ filter_in = filter_in.get('filter', filter_in)
+ if len(active['config']) > 0:
+ active = filters.and_filter(active, filter_in)
+ else:
+ active = filter_in
return active
| cli download command improperly rejects --filter-json argument
To make the download experience nicer, the download command implicitly creates a permissions filter but does not combine it with the provided filter argument and exits with the message:
`Error: Specify filter options or provide using --filter-json, not both`
It seems like the proper behavior would be:
1. if the provided filter is an AND filter, add the permissions filter as a predicate
2. otherwise create a new top-level AND filter with the permissions and provided filter as predicates
| planetlabs/planet-client-python | diff --git a/tests/test_v1_cli.py b/tests/test_v1_cli.py
index 8b56083..838ff07 100644
--- a/tests/test_v1_cli.py
+++ b/tests/test_v1_cli.py
@@ -55,16 +55,47 @@ def test_filter(runner):
def filt(*opts):
return runner.invoke(main, ['data', 'filter'] + list(opts))
assert_success(filt(), {
- "type": "AndFilter",
- "config": []
+ 'type': 'AndFilter',
+ 'config': []
})
assert_success(filt('--string-in', 'eff', 'a b c'), {
- "type": "AndFilter",
- "config": [
+ 'type': 'AndFilter',
+ 'config': [
{'config': ['a', 'b', 'c'], 'field_name': 'eff',
'type': 'StringInFilter'}
]
})
+ filter_spec = {
+ 'type': 'StringInFilter',
+ 'config': ['a'],
+ 'field_name': 'eff'
+ }
+ # if no other options, filter-json is used as is
+ assert_success(filt('--filter-json', json.dumps(filter_spec)), filter_spec)
+ # verify we extract the filter property of a search
+ assert_success(filt('--filter-json', json.dumps({
+ "filter": filter_spec
+ })), filter_spec)
+ # filters are combined - the --string-in option results in a single AND
+ # filter and this is combined with the provided filter_spec in a top-level
+ # AND filter
+ assert_success(filt('--filter-json', json.dumps(filter_spec),
+ '--string-in', 'eff', 'b'), {
+ 'config': [
+ {
+ 'type': 'AndFilter',
+ 'config': [
+ {
+ 'type': 'StringInFilter',
+ 'config': ['b'],
+ 'field_name': 'eff'
+ }
+ ]
+ },
+ filter_spec
+ ],
+ 'type': 'AndFilter'
+ })
# @todo more cases that are easier to write/maintain
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock",
"pytest-xdist"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
docutils==0.17.1
execnet==1.9.0
flake8==5.0.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
packaging==21.3
pex==2.33.7
-e git+https://github.com/planetlabs/planet-client-python.git@49da66f16774682e377dfd79eebe0f33ea47f8e1#egg=planet
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
pytz==2025.2
requests==2.27.1
requests-futures==1.0.2
requests-mock==1.12.1
snowballstemmer==2.2.0
Sphinx==4.3.2
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: planet-client-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- docutils==0.17.1
- execnet==1.9.0
- flake8==5.0.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- packaging==21.3
- pex==2.33.7
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- pytz==2025.2
- requests==2.27.1
- requests-futures==1.0.2
- requests-mock==1.12.1
- setuptools==20.10.1
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/planet-client-python
| [
"tests/test_v1_cli.py::test_filter"
] | [
"tests/test_v1_cli.py::test_filter_options_invalid"
] | [
"tests/test_v1_cli.py::test_quick_search",
"tests/test_v1_cli.py::test_download_errors",
"tests/test_v1_cli.py::test_download_dry_run",
"tests/test_v1_cli.py::test_download_quick",
"tests/test_v1_cli.py::test_download_search_id",
"tests/test_v1_cli.py::test_create_search",
"tests/test_v1_cli.py::test_geom_filter"
] | [] | Apache License 2.0 | 1,863 | 370 | [
"planet/scripts/util.py"
] |
pynamodb__PynamoDB-410 | 1828bda52376a4b0313146b64ffb447e5392f467 | 2017-11-10 00:25:08 | 1828bda52376a4b0313146b64ffb447e5392f467 | diff --git a/pynamodb/connection/base.py b/pynamodb/connection/base.py
index ac92ae0..bf425ef 100644
--- a/pynamodb/connection/base.py
+++ b/pynamodb/connection/base.py
@@ -95,6 +95,22 @@ class MetaTable(object):
break
return self._hash_keyname
+ def get_key_names(self, index_name=None):
+ """
+ Returns the names of the primary key attributes and index key attributes (if index_name is specified)
+ """
+ key_names = [self.hash_keyname]
+ if self.range_keyname:
+ key_names.append(self.range_keyname)
+ if index_name is not None:
+ index_hash_keyname = self.get_index_hash_keyname(index_name)
+ if index_hash_keyname not in key_names:
+ key_names.append(index_hash_keyname)
+ index_range_keyname = self.get_index_range_keyname(index_name)
+ if index_range_keyname is not None and index_range_keyname not in key_names:
+ key_names.append(index_range_keyname)
+ return key_names
+
def get_index_hash_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
diff --git a/pynamodb/connection/table.py b/pynamodb/connection/table.py
index 1e7a439..35f46bd 100644
--- a/pynamodb/connection/table.py
+++ b/pynamodb/connection/table.py
@@ -28,6 +28,12 @@ class TableConnection(object):
max_retry_attempts=max_retry_attempts,
base_backoff_ms=base_backoff_ms)
+ def get_meta_table(self, refresh=False):
+ """
+ Returns a MetaTable
+ """
+ return self.connection.get_meta_table(self.table_name, refresh=refresh)
+
def delete_item(self, hash_key,
range_key=None,
condition=None,
diff --git a/pynamodb/pagination.py b/pynamodb/pagination.py
index 8f008e2..6f948b6 100644
--- a/pynamodb/pagination.py
+++ b/pynamodb/pagination.py
@@ -35,6 +35,16 @@ class PageIterator(object):
def next(self):
return self.__next__()
+ @property
+ def key_names(self):
+ # If the current page has a last_evaluated_key, use it to determine key attributes
+ if self._last_evaluated_key:
+ return self._last_evaluated_key.keys()
+
+ # Use the table meta data to determine the key attributes
+ table_meta = self._operation.im_self.get_meta_table()
+ return table_meta.get_key_names(self._kwargs.get('index_name'))
+
@property
def page_size(self):
return self._kwargs.get('limit')
@@ -100,7 +110,20 @@ class ResultIterator(object):
@property
def last_evaluated_key(self):
- return self.page_iter.last_evaluated_key
+ if self._first_iteration:
+ # Not started iterating yet: there cannot be a last_evaluated_key
+ return None
+
+ if self._index == self._count:
+ # Entire page has been consumed: last_evaluated_key is whatever DynamoDB returned
+ # It may correspond to the current item, or it may correspond to an item evaluated but not returned.
+ return self.page_iter.last_evaluated_key
+
+ # In the middle of a page of results: reconstruct a last_evaluated_key from the current item
+ # The operation should be resumed starting at the last item returned, not the last item evaluated.
+ # This can occur if the 'limit' is reached in the middle of a page.
+ item = self._items[self._index - 1]
+ return dict((key, item[key]) for key in self.page_iter.key_names)
@property
def total_count(self):
| Last Evaluated Key and Limited Queries/Scans
The `last_evaluated_key` value returns whatever was last evaluated for the current result page. When limiting the query but not setting the page size to the limit, the value does not represent the last item returned by the iterator. | pynamodb/PynamoDB | diff --git a/pynamodb/tests/test_base_connection.py b/pynamodb/tests/test_base_connection.py
index 4095d6c..b9b33fa 100644
--- a/pynamodb/tests/test_base_connection.py
+++ b/pynamodb/tests/test_base_connection.py
@@ -6,10 +6,12 @@ import json
import six
from pynamodb.compat import CompatTestCase as TestCase
from pynamodb.connection import Connection
+from pynamodb.connection.base import MetaTable
from botocore.vendored import requests
from pynamodb.exceptions import (VerboseClientError,
TableError, DeleteError, UpdateError, PutError, GetError, ScanError, QueryError, TableDoesNotExist)
-from pynamodb.constants import DEFAULT_REGION, UNPROCESSED_ITEMS, STRING_SHORT, BINARY_SHORT, DEFAULT_ENCODING
+from pynamodb.constants import (
+ DEFAULT_REGION, UNPROCESSED_ITEMS, STRING_SHORT, BINARY_SHORT, DEFAULT_ENCODING, TABLE_KEY)
from pynamodb.expressions.operand import Path
from pynamodb.tests.data import DESCRIBE_TABLE_DATA, GET_ITEM_DATA, LIST_TABLE_DATA
from pynamodb.tests.deep_eq import deep_eq
@@ -25,6 +27,23 @@ else:
PATCH_METHOD = 'pynamodb.connection.Connection._make_api_call'
+class MetaTableTestCase(TestCase):
+ """
+ Tests for the meta table class
+ """
+
+ def setUp(self):
+ self.meta_table = MetaTable(DESCRIBE_TABLE_DATA.get(TABLE_KEY))
+
+ def test_get_key_names(self):
+ key_names = self.meta_table.get_key_names()
+ self.assertEqual(key_names, ["ForumName", "Subject"])
+
+ def test_get_key_names_index(self):
+ key_names = self.meta_table.get_key_names("LastPostIndex")
+ self.assertEqual(key_names, ["ForumName", "Subject", "LastPostDateTime"])
+
+
class ConnectionTestCase(TestCase):
"""
Tests for the base connection class
diff --git a/pynamodb/tests/test_model.py b/pynamodb/tests/test_model.py
index f0ac212..ab93c07 100644
--- a/pynamodb/tests/test_model.py
+++ b/pynamodb/tests/test_model.py
@@ -2125,9 +2125,9 @@ class ModelTestCase(TestCase):
items.append(item)
req.side_effect = [
- {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': 'x'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': 'y'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[20:30], 'LastEvaluatedKey': 'z'},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': {'user_id': 'x'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': {'user_id': 'y'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[20:30], 'LastEvaluatedKey': {'user_id': 'z'}},
]
results_iter = UserModel.query('foo', limit=25)
results = list(results_iter)
@@ -2136,7 +2136,7 @@ class ModelTestCase(TestCase):
self.assertEquals(req.mock_calls[0][1][1]['Limit'], 25)
self.assertEquals(req.mock_calls[1][1][1]['Limit'], 25)
self.assertEquals(req.mock_calls[2][1][1]['Limit'], 25)
- self.assertEquals(results_iter.last_evaluated_key, 'z')
+ self.assertEquals(results_iter.last_evaluated_key, {'user_id': items[24]['user_id']})
self.assertEquals(results_iter.total_count, 30)
self.assertEquals(results_iter.page_iter.total_scanned_count, 60)
@@ -2153,9 +2153,9 @@ class ModelTestCase(TestCase):
items.append(item)
req.side_effect = [
- {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': 'x'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': 'y'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[20:30], 'LastEvaluatedKey': 'z'},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': {'user_id': 'x'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': {'user_id': 'y'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[20:30], 'LastEvaluatedKey': {'user_id': 'x'}},
]
results_iter = UserModel.query('foo', limit=25, page_size=10)
results = list(results_iter)
@@ -2164,7 +2164,7 @@ class ModelTestCase(TestCase):
self.assertEquals(req.mock_calls[0][1][1]['Limit'], 10)
self.assertEquals(req.mock_calls[1][1][1]['Limit'], 10)
self.assertEquals(req.mock_calls[2][1][1]['Limit'], 10)
- self.assertEquals(results_iter.last_evaluated_key, 'z')
+ self.assertEquals(results_iter.last_evaluated_key, {'user_id': items[24]['user_id']})
self.assertEquals(results_iter.total_count, 30)
self.assertEquals(results_iter.page_iter.total_scanned_count, 60)
@@ -2181,8 +2181,8 @@ class ModelTestCase(TestCase):
items.append(item)
req.side_effect = [
- {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': 'x'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': 'y'},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': {'user_id': 'x'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': {'user_id': 'y'}},
{'Count': 10, 'ScannedCount': 20, 'Items': items[20:30]},
]
results_iter = UserModel.query('foo', limit=50)
@@ -2209,8 +2209,8 @@ class ModelTestCase(TestCase):
items.append(item)
req.side_effect = [
- {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': 'x'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': 'y'},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': {'user_id': 'x'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': {'user_id': 'y'}},
{'Count': 10, 'ScannedCount': 20, 'Items': items[20:30]},
]
results_iter = UserModel.query('foo', limit=50, page_size=10)
@@ -2545,9 +2545,9 @@ class ModelTestCase(TestCase):
items.append(item)
req.side_effect = [
- {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': 'x'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': 'y'},
- {'Count': 10, 'ScannedCount': 20, 'Items': items[20:30], 'LastEvaluatedKey': 'z'},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[:10], 'LastEvaluatedKey': {'user_id': 'x'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[10:20], 'LastEvaluatedKey': {'user_id': 'y'}},
+ {'Count': 10, 'ScannedCount': 20, 'Items': items[20:30], 'LastEvaluatedKey': {'user_id': 'z'}},
]
results_iter = UserModel.scan(limit=25, page_size=10)
results = list(results_iter)
@@ -2556,7 +2556,7 @@ class ModelTestCase(TestCase):
self.assertEquals(req.mock_calls[0][1][1]['Limit'], 10)
self.assertEquals(req.mock_calls[1][1][1]['Limit'], 10)
self.assertEquals(req.mock_calls[2][1][1]['Limit'], 10)
- self.assertEquals(results_iter.last_evaluated_key, 'z')
+ self.assertEquals(results_iter.last_evaluated_key, {'user_id': items[24]['user_id']})
self.assertEquals(results_iter.total_count, 30)
self.assertEquals(results_iter.page_iter.total_scanned_count, 60)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
botocore==1.2.0
certifi==2021.5.30
coverage==6.2
docutils==0.18.1
importlib-metadata==4.8.3
iniconfig==1.1.1
jmespath==0.7.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/pynamodb/PynamoDB.git@1828bda52376a4b0313146b64ffb447e5392f467#egg=pynamodb
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-mock==3.6.1
python-dateutil==2.9.0.post0
six==1.9.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: PynamoDB
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- botocore==1.2.0
- coverage==6.2
- docutils==0.18.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jmespath==0.7.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- python-dateutil==2.9.0.post0
- six==1.9.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/PynamoDB
| [
"pynamodb/tests/test_base_connection.py::MetaTableTestCase::test_get_key_names",
"pynamodb/tests/test_base_connection.py::MetaTableTestCase::test_get_key_names_index",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_and_page_size",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_items_multiple_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit_with_page_size"
] | [
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_create_connection"
] | [
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_batch_get_item",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_batch_write_item",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_create_prepared_request",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_create_table",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_delete_item",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_delete_table",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_describe_table",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_get_expected_map",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_get_item",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_get_query_filter_map",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_handle_binary_attributes_for_unprocessed_items",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_handle_binary_attributes_for_unprocessed_keys",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_list_tables",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_retries_properly",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_retry_disabled",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_verbose_error_after_backoff",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_verbose_error_after_backoff_later_succeeds",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_when_retries_exhausted",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_put_item",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_query",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_max_sleep",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_min_sleep",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_on_rate_unavailable",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_on_rate_unavailable_within_s",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_timeout",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_exception_on_max_threshold",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_raises_non_client_error",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_raises_other_client_errors",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_retries_on_throttling",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_with_pagination_ends",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_scan",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_subsequent_client_is_cached_when_credentials_truthy",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_subsequent_client_is_not_cached_when_credentials_none",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_update_item",
"pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_update_table",
"pynamodb/tests/test_model.py::ModelTestCase::test_batch_get",
"pynamodb/tests/test_model.py::ModelTestCase::test_batch_write",
"pynamodb/tests/test_model.py::ModelTestCase::test_batch_write_with_unprocessed",
"pynamodb/tests/test_model.py::ModelTestCase::test_car_model_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_car_model_with_null_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_complex_key",
"pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_is_complex",
"pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_conditional_operator_map_attribute",
"pynamodb/tests/test_model.py::ModelTestCase::test_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_count_no_hash_key",
"pynamodb/tests/test_model.py::ModelTestCase::test_create_model",
"pynamodb/tests/test_model.py::ModelTestCase::test_delete",
"pynamodb/tests/test_model.py::ModelTestCase::test_delete_doesnt_do_validation_on_null_attributes",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_map_four_layers_deep_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_false_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_true_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_false_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_true_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_dumps",
"pynamodb/tests/test_model.py::ModelTestCase::test_explicit_raw_map_serialize_pass",
"pynamodb/tests/test_model.py::ModelTestCase::test_filter_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_get",
"pynamodb/tests/test_model.py::ModelTestCase::test_global_index",
"pynamodb/tests/test_model.py::ModelTestCase::test_index_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_index_multipage_count",
"pynamodb/tests/test_model.py::ModelTestCase::test_index_queries",
"pynamodb/tests/test_model.py::ModelTestCase::test_invalid_car_model_with_null_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_invalid_map_model_raises",
"pynamodb/tests/test_model.py::ModelTestCase::test_list_of_map_works_like_list_of_map",
"pynamodb/tests/test_model.py::ModelTestCase::test_list_works_like_list",
"pynamodb/tests/test_model.py::ModelTestCase::test_loads",
"pynamodb/tests/test_model.py::ModelTestCase::test_loads_complex_model",
"pynamodb/tests/test_model.py::ModelTestCase::test_local_index",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_attrs",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_subclass_attributes_inherited_on_create",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_invalid_data_does_not_validate",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_nulls_retrieve_from_db",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_pythonic_attributes",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_with_nulls_validates",
"pynamodb/tests/test_model.py::ModelTestCase::test_model_works_like_model",
"pynamodb/tests/test_model.py::ModelTestCase::test_multiple_indices_share_non_key_attribute",
"pynamodb/tests/test_model.py::ModelTestCase::test_new_style_boolean_serializes_as_bool",
"pynamodb/tests/test_model.py::ModelTestCase::test_old_style_boolean_serializes_as_bool",
"pynamodb/tests/test_model.py::ModelTestCase::test_old_style_model_exception",
"pynamodb/tests/test_model.py::ModelTestCase::test_overidden_defaults",
"pynamodb/tests/test_model.py::ModelTestCase::test_overidden_session",
"pynamodb/tests/test_model.py::ModelTestCase::test_overridden_attr_name",
"pynamodb/tests/test_model.py::ModelTestCase::test_projections",
"pynamodb/tests/test_model.py::ModelTestCase::test_query",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_and_page_size",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_multiple_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_single_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_identical_to_available_items_single_page",
"pynamodb/tests/test_model.py::ModelTestCase::test_rate_limited_scan",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_deserialize",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_from_raw_data_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_serialize_pass",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_deserializes",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_from_raw_data_works",
"pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_serialize_fun_one",
"pynamodb/tests/test_model.py::ModelTestCase::test_refresh",
"pynamodb/tests/test_model.py::ModelTestCase::test_result_set_init",
"pynamodb/tests/test_model.py::ModelTestCase::test_result_set_iter",
"pynamodb/tests/test_model.py::ModelTestCase::test_save",
"pynamodb/tests/test_model.py::ModelTestCase::test_scan",
"pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit",
"pynamodb/tests/test_model.py::ModelTestCase::test_update",
"pynamodb/tests/test_model.py::ModelTestCase::test_update_item",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_dict_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_initialized_instance_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_dict_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_initialized_instance_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attribute_member_with_initialized_instance_init",
"pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attributes_member_with_dict_init"
] | [] | MIT License | 1,864 | 883 | [
"pynamodb/connection/base.py",
"pynamodb/connection/table.py",
"pynamodb/pagination.py"
] |
|
alvinwan__TexSoup-12 | b95820ac9f507916ce0a777cfcefe003ceb10c20 | 2017-11-10 18:03:57 | fa0be81ffe1ceb6189d2fdcec2114f706cdefe76 | diff --git a/TexSoup/reader.py b/TexSoup/reader.py
index 6125322..c6012fa 100644
--- a/TexSoup/reader.py
+++ b/TexSoup/reader.py
@@ -136,11 +136,19 @@ def tokenize_math(text):
>>> tokenize_math(b)
'$$\\min_x$$'
"""
+
+ def escaped_dollar():
+ return text.peek() == '$' and result[-1] == '\\'
+
+ def end_detected():
+ return (text.peek((0, len(starter))) == starter
+ and not escaped_dollar())
+
result = TokenWithPosition('', text.position)
if text.startswith('$'):
starter = '$$' if text.startswith('$$') else '$'
result += text.forward(len(starter))
- while text.hasNext() and text.peek((0, len(starter))) != starter:
+ while text.hasNext() and not end_detected():
result += next(text)
if not text.startswith(starter):
raise EOFError('Expecting %s. Instead got %s' % (
@@ -267,4 +275,4 @@ def read_arg(src, c):
content.append(read_tex(src))
else:
content.append(next(src))
- return Arg.parse(content)
\ No newline at end of file
+ return Arg.parse(content)
| $ and $$ math is not parsed
To reproduce:
```python
In [4]: list(TexSoup.read('$\lambda$').children)
Out[4]: [TexCmd('lambda$')]
```
Expected:
`$` and `$$` should be treated as paired delimiters and result in a correct environment.
TexSoup version:
0.0.3 | alvinwan/TexSoup | diff --git a/tests/test_parser.py b/tests/test_parser.py
index de08b65..232daf0 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -216,6 +216,13 @@ def test_math_environment_whitespace():
assert '\$' in contents[1], 'Dollar sign not escaped!'
+def test_math_environment_escape():
+ """Tests $ escapes in math environment."""
+ soup = TexSoup("$ \$ $")
+ contents = list(soup.contents)
+ assert '\$' in contents[0][0], 'Dollar sign not escaped!'
+
+
def test_punctuation_command_structure():
"""Tests that commands for punctuation work."""
soup = TexSoup(r"""\right. \right[ \right( \right|""")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage",
"coveralls"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==3.7.1
coveralls==1.1
docopt==0.6.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
-e git+https://github.com/alvinwan/TexSoup.git@b95820ac9f507916ce0a777cfcefe003ceb10c20#egg=TexSoup
tomli==2.2.1
urllib3==2.3.0
| name: TexSoup
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==3.7.1
- coveralls==1.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/TexSoup
| [
"tests/test_parser.py::test_math_environment_escape"
] | [] | [
"TexSoup/__init__.py::TexSoup.TexSoup",
"TexSoup/data.py::TexSoup.data.TexArgs",
"TexSoup/data.py::TexSoup.data.TexArgs.__repr__",
"TexSoup/data.py::TexSoup.data.TexArgs.__str__",
"TexSoup/data.py::TexSoup.data.TexCmd",
"TexSoup/data.py::TexSoup.data.TexEnv",
"TexSoup/data.py::TexSoup.data.TexNode.__match__",
"TexSoup/reader.py::TexSoup.reader.next_token",
"TexSoup/reader.py::TexSoup.reader.tokenize",
"TexSoup/reader.py::TexSoup.reader.tokenize_math",
"TexSoup/reader.py::TexSoup.reader.tokenize_string",
"TexSoup/utils.py::TexSoup.utils.Buffer",
"TexSoup/utils.py::TexSoup.utils.Buffer.__getitem__",
"TexSoup/utils.py::TexSoup.utils.Buffer.backward",
"TexSoup/utils.py::TexSoup.utils.Buffer.forward",
"TexSoup/utils.py::TexSoup.utils.to_buffer",
"tests/test_parser.py::test_commands_only",
"tests/test_parser.py::test_commands_envs_only",
"tests/test_parser.py::test_commands_envs_text",
"tests/test_parser.py::test_text_preserved",
"tests/test_parser.py::test_command_name_parse",
"tests/test_parser.py::test_commands_without_arguments",
"tests/test_parser.py::test_unlabeled_environment",
"tests/test_parser.py::test_ignore_environment",
"tests/test_parser.py::test_inline_math",
"tests/test_parser.py::test_escaped_characters",
"tests/test_parser.py::test_basic_whitespace",
"tests/test_parser.py::test_whitespace_in_command",
"tests/test_parser.py::test_math_environment_whitespace",
"tests/test_parser.py::test_punctuation_command_structure",
"tests/test_parser.py::test_unclosed_environments",
"tests/test_parser.py::test_unclosed_math_environments",
"tests/test_parser.py::test_arg_parse"
] | [] | BSD 2-Clause "Simplified" License | 1,871 | 314 | [
"TexSoup/reader.py"
] |
|
nose-devs__nose2-369 | 862b130652b9118eb8d5681923c04edb000245d3 | 2017-11-11 23:49:52 | 90945cad86df2d11cc9332219ee85aca97311f6e | coveralls:
[](https://coveralls.io/builds/14156452)
Coverage increased (+0.2%) to 87.69% when pulling **d436976ece996497dac644fc59a40b2ead323ae7 on ptthiem:issue/167** into **862b130652b9118eb8d5681923c04edb000245d3 on nose-devs:master**.
ptthiem: There is a small warning being thrown by the test, but it seems to be working. I'll check it tomorrow afternoon
coveralls:
[](https://coveralls.io/builds/14157959)
Coverage increased (+0.07%) to 87.527% when pulling **d436976ece996497dac644fc59a40b2ead323ae7 on ptthiem:issue/167** into **862b130652b9118eb8d5681923c04edb000245d3 on nose-devs:master**.
coveralls:
[](https://coveralls.io/builds/14158069)
Coverage increased (+0.2%) to 87.636% when pulling **6a73c8057173469da7eae5a0d4c9b0a07e9867bf on ptthiem:issue/167** into **862b130652b9118eb8d5681923c04edb000245d3 on nose-devs:master**.
| diff --git a/nose2/plugins/layers.py b/nose2/plugins/layers.py
index 43025ca..eda14dc 100644
--- a/nose2/plugins/layers.py
+++ b/nose2/plugins/layers.py
@@ -235,6 +235,9 @@ class LayerReporter(events.Plugin):
if event.errorList and hasattr(event.test, 'layer'):
# walk back layers to build full description
self.describeLayers(event)
+ # we need to remove "\n" from description to keep a well indented report when tests have docstrings
+ # see https://github.com/nose-devs/nose2/issues/327 for more information
+ event.description = event.description.replace('\n', ' ')
def describeLayers(self, event):
desc = [event.description]
diff --git a/nose2/plugins/mp.py b/nose2/plugins/mp.py
index a8346d0..b712bb3 100644
--- a/nose2/plugins/mp.py
+++ b/nose2/plugins/mp.py
@@ -80,8 +80,8 @@ class MultiProcess(events.Plugin):
if testid.startswith(failed_import_id):
self.cases[testid].run(result_)
- # XXX The length of the filtered list needs to be known
- # for _startProcs, until this can be cleaned up. This
+ # XXX Process-Handling: The length of the filtered list needs to be
+ # known for _startProcs, until this can be cleaned up. This
# wasn't the best way to deal with too few tests
flat = [x for x in flat if not x.startswith(failed_import_id)]
procs = self._startProcs(len(flat))
@@ -91,22 +91,26 @@ class MultiProcess(events.Plugin):
if not flat:
break
caseid = flat.pop(0)
+ # NOTE: it throws errors on broken pipes and bad serialization
conn.send(caseid)
rdrs = [conn for proc, conn in procs if proc.is_alive()]
while flat or rdrs:
ready, _, _ = select.select(rdrs, [], [], self.testRunTimeout)
for conn in ready:
- # XXX proc could be dead
+ # XXX Process-Handling: If we get an EOFError on receive the
+ # process finished= or we lost the process and the test it was
+ # working on. Also do we rebuild the process?
try:
remote_events = conn.recv()
except EOFError:
# probably dead/12
log.warning("Subprocess connection closed unexpectedly")
- continue # XXX or die?
+ continue
+ # If remote_events is None, the process exited normally,
+ # which should mean that we didn't any more tests for it.
if remote_events is None:
- # XXX proc is done, how to mark it dead?
log.debug("Conn closed %s", conn)
rdrs.remove(conn)
continue
@@ -119,9 +123,10 @@ class MultiProcess(events.Plugin):
self._localize(event)
getattr(self.session.hooks, hook)(event)
- # send a new test to the worker if there is one left
+ # Send the next test_id
+ # NOTE: send throws errors on broken pipes and bad serialization
if not flat:
- # if there isn't send None - it's the 'done' flag
+ # If there are no more, send None - it's the 'done' flag
conn.send(None)
continue
caseid = flat.pop(0)
@@ -129,6 +134,7 @@ class MultiProcess(events.Plugin):
for _, conn in procs:
conn.close()
+
# ensure we wait until all processes are done before
# exiting, to allow plugins running there to finalize
for proc, _ in procs:
@@ -174,7 +180,7 @@ class MultiProcess(events.Plugin):
return parent_conn
def _startProcs(self, test_count):
- # XXX create session export
+ # Create session export
session_export = self._exportSession()
procs = []
count = min(test_count, self.procs)
@@ -190,11 +196,15 @@ class MultiProcess(events.Plugin):
return procs
def _flatten(self, suite):
- # XXX
- # examine suite tests to find out if they have class
- # or module fixtures and group them that way into names
- # of test classes or modules
- # ALSO record all test cases in self.cases
+ """
+ Flatten test-suite into list of IDs, AND record all test case
+ into self.cases
+
+ CAVEAT: Due to current limitation of the MP plugin, examine the suite
+ tests to find out if they have class or module fixtures and
+ group them that way into name of test classes or module.
+ This is aid in their dispatch.
+ """
log.debug("Flattening test into list of IDs")
mods = {}
classes = {}
@@ -244,19 +254,27 @@ class MultiProcess(events.Plugin):
"main process" % event.test))._tests[0]
def _exportSession(self):
- # argparse isn't pickleable
- # no plugin instances
- # no hooks
+ """
+ Generate the session information passed to work process.
+
+ CAVEAT: The entire contents of which *MUST* be pickeable
+ and safe to use in the subprocess.
+
+ This probably includes:
+ * No argparse namespaces/named-tuples
+ * No plugin instances
+ * No hokes
+ :return:
+ """
export = {'config': self.session.config,
'verbosity': self.session.verbosity,
'startDir': self.session.startDir,
'topLevelDir': self.session.topLevelDir,
'logLevel': self.session.logLevel,
- # XXX classes or modules?
'pluginClasses': []}
- # XXX fire registerInSubprocess -- add those plugin classes
- # (classes must be pickleable!)
- event = RegisterInSubprocessEvent() # FIXME should be own event type
+ event = RegisterInSubprocessEvent()
+ # fire registerInSubprocess on plugins -- add those plugin classes
+ # CAVEAT: classes must be pickleable!
self.session.hooks.registerInSubprocess(event)
export['pluginClasses'].extend(event.pluginClasses)
return export
@@ -268,31 +286,16 @@ def procserver(session_export, conn):
rlog.setLevel(session_export['logLevel'])
# make a real session from the "session" we got
- ssn = session.Session()
- ssn.config = session_export['config']
- ssn.hooks = RecordingPluginInterface()
- ssn.verbosity = session_export['verbosity']
- ssn.startDir = session_export['startDir']
- ssn.topLevelDir = session_export['topLevelDir']
- ssn.prepareSysPath()
- loader_ = loader.PluggableTestLoader(ssn)
- ssn.testLoader = loader_
- result_ = result.PluggableTestResult(ssn)
- ssn.testResult = result_
- runner_ = runner.PluggableTestRunner(ssn) # needed??
- ssn.testRunner = runner_
- # load and register plugins
- ssn.plugins = [
- plugin(session=ssn) for plugin in session_export['pluginClasses']]
- rlog.debug("Plugins loaded: %s", ssn.plugins)
- for plugin in ssn.plugins:
- plugin.register()
- rlog.debug("Registered %s in subprocess", plugin)
+ ssn = import_session(rlog, session_export)
if isinstance(conn, collections.Sequence):
conn = connection.Client(conn[:2], authkey=conn[2])
- event = SubprocessEvent(loader_, result_, runner_, ssn.plugins, conn)
+ event = SubprocessEvent(ssn.testLoader,
+ ssn.testResult,
+ ssn.testRunner,
+ ssn.plugins,
+ conn)
res = ssn.hooks.startSubprocess(event)
if event.handled and not res:
conn.send(None)
@@ -308,7 +311,7 @@ def procserver(session_export, conn):
# deal with the case that testid is something other
# than a simple string.
test = event.loader.loadTestsFromName(testid)
- # xxx try/except?
+ # XXX If there a need to protect the loop? try/except?
rlog.debug("Execute test %s (%s)", testid, test)
executor(test, event.result)
events = [e for e in ssn.hooks.flush()]
@@ -319,6 +322,38 @@ def procserver(session_export, conn):
ssn.hooks.stopSubprocess(event)
+def import_session(rlog, session_export):
+ ssn = session.Session()
+ ssn.config = session_export['config']
+ ssn.hooks = RecordingPluginInterface()
+ ssn.verbosity = session_export['verbosity']
+ ssn.startDir = session_export['startDir']
+ ssn.topLevelDir = session_export['topLevelDir']
+ ssn.prepareSysPath()
+ loader_ = loader.PluggableTestLoader(ssn)
+ ssn.testLoader = loader_
+ result_ = result.PluggableTestResult(ssn)
+ ssn.testResult = result_
+ runner_ = runner.PluggableTestRunner(ssn) # needed??
+ ssn.testRunner = runner_
+ # load and register plugins, forcing multiprocess to the end
+ ssn.plugins = [
+ plugin(session=ssn) for plugin in session_export['pluginClasses']
+ if plugin is not MultiProcess
+ ]
+ rlog.debug("Plugins loaded: %s", ssn.plugins)
+
+ for plugin in ssn.plugins:
+ plugin.register()
+ rlog.debug("Registered %s in subprocess", plugin)
+
+ # instantiating the plugin will register it.
+ ssn.plugins.append(MultiProcess(session=ssn))
+ rlog.debug("Registered %s in subprocess", MultiProcess)
+ ssn.plugins[-1].pluginsLoaded(events.PluginsLoadedEvent(ssn.plugins))
+ return ssn
+
+
# test generator
def gentests(conn):
while True:
| MP plugin does't call startSubprocess in subprocess
My very quick diagnosis is:
In [mp.py](https://github.com/nose-devs/nose2/blob/master/nose2/plugins/mp.py):
- The new hook methods are registered on [line 27](https://github.com/nose-devs/nose2/blob/master/nose2/plugins/mp.py#L27)
- However that is only called in the master process
- Hence, when in the subprocess on [line 197](https://github.com/nose-devs/nose2/blob/master/nose2/plugins/mp.py#L197) we register the plugins, the session doesn't know to look for `startSubprocess` (or `stopSubprocess` for that matter)
- Hence when it tries to dispatch it on [line 202](https://github.com/nose-devs/nose2/blob/master/nose2/plugins/mp.py#L202) it won't call the plugins
| nose-devs/nose2 | diff --git a/nose2/tests/functional/support/scenario/layers/test_layers.py b/nose2/tests/functional/support/scenario/layers/test_layers.py
index e800780..a35bb0f 100644
--- a/nose2/tests/functional/support/scenario/layers/test_layers.py
+++ b/nose2/tests/functional/support/scenario/layers/test_layers.py
@@ -180,6 +180,8 @@ class InnerD(unittest.TestCase):
layer = LayerD
def test(self):
+ """test with docstring
+ """
self.assertEqual(
{'base': 'setup',
'layerD': 'setup'},
diff --git a/nose2/tests/functional/test_layers_plugin.py b/nose2/tests/functional/test_layers_plugin.py
index 9666dba..c658bbe 100644
--- a/nose2/tests/functional/test_layers_plugin.py
+++ b/nose2/tests/functional/test_layers_plugin.py
@@ -56,7 +56,7 @@ class TestLayers(FunctionalTestCase):
Base
test \(test_layers.Outer\) ... ok
LayerD
- test \(test_layers.InnerD\) ... ok
+ test \(test_layers.InnerD\) test with docstring ... ok
LayerA
test \(test_layers.InnerA\) ... ok
LayerB
diff --git a/nose2/tests/unit/test_mp_plugin.py b/nose2/tests/unit/test_mp_plugin.py
index 02a9949..e808bdd 100644
--- a/nose2/tests/unit/test_mp_plugin.py
+++ b/nose2/tests/unit/test_mp_plugin.py
@@ -1,6 +1,7 @@
from nose2 import session
from nose2.tests._common import TestCase, Conn
from nose2.plugins import mp
+from six.moves import configparser
import sys
@@ -58,3 +59,19 @@ class TestMPPlugin(TestCase):
finally:
sys.platform = platform
+ def test_session_import(self):
+ config = configparser.ConfigParser()
+ config.add_section(mp.MultiProcess.configSection)
+ export_session = {
+ "config": config,
+ "verbosity": None,
+ "startDir": '',
+ "topLevelDir": '',
+ "pluginClasses": [mp.MultiProcess]
+ }
+ import logging
+ session = mp.import_session(logging.root, export_session)
+ self.assertIn('registerInSubprocess', session.hooks.methods)
+ self.assertIn('startSubprocess', session.hooks.methods)
+ self.assertIn('stopSubprocess', session.hooks.methods)
+ pass
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[coverage_plugin]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose2",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/nose-devs/nose2.git@862b130652b9118eb8d5681923c04edb000245d3#egg=nose2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: nose2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nose2
| [
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_layer_reporter_output",
"nose2/tests/unit/test_mp_plugin.py::TestMPPlugin::test_session_import"
] | [
"nose2/tests/functional/support/scenario/layers/test_layers.py::Outer::test",
"nose2/tests/functional/support/scenario/layers/test_layers.py::InnerA::test",
"nose2/tests/functional/support/scenario/layers/test_layers.py::InnerA_1::test",
"nose2/tests/functional/support/scenario/layers/test_layers.py::InnerB_1::test",
"nose2/tests/functional/support/scenario/layers/test_layers.py::InnerC::test",
"nose2/tests/functional/support/scenario/layers/test_layers.py::InnerC::test2",
"nose2/tests/functional/support/scenario/layers/test_layers.py::InnerD::test"
] | [
"nose2/tests/functional/support/scenario/layers/test_layers.py::NoLayer::test",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_layer_reporter_error_output",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_layers_and_attributes",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_layers_and_non_layers",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_methods_run_once_per_class",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_runs_layer_fixtures",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_scenario_fails_without_plugin",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_setup_fail",
"nose2/tests/functional/test_layers_plugin.py::TestLayers::test_teardown_fail",
"nose2/tests/unit/test_mp_plugin.py::TestMPPlugin::test_address",
"nose2/tests/unit/test_mp_plugin.py::TestMPPlugin::test_gentests",
"nose2/tests/unit/test_mp_plugin.py::TestMPPlugin::test_recording_plugin_interface"
] | [] | BSD | 1,878 | 2,381 | [
"nose2/plugins/layers.py",
"nose2/plugins/mp.py"
] |
ucfopen__canvasapi-107 | d05071ddc2e4a37ebd49d162f4f080757befeb4c | 2017-11-13 21:24:00 | db3c377b68f2953e1618f4e4588cc2db8603841e | diff --git a/canvasapi/requester.py b/canvasapi/requester.py
index eff4b3a..c017a42 100644
--- a/canvasapi/requester.py
+++ b/canvasapi/requester.py
@@ -24,6 +24,7 @@ class Requester(object):
self.base_url = base_url
self.access_token = access_token
self._session = requests.Session()
+ self._cache = []
def request(
self, method, endpoint=None, headers=None, use_auth=True,
@@ -84,6 +85,12 @@ class Requester(object):
# Call the request method
response = req_method(full_url, headers, _kwargs)
+ # Add response to internal cache
+ if len(self._cache) > 4:
+ self._cache.pop()
+
+ self._cache.insert(0, response)
+
# Raise for status codes
if response.status_code == 400:
raise BadRequest(response.json())
| Add a Requester cache for debugging
We should add a cache of requests and responses onto `Requester` to make debugging easier.
This could be as simple as a tuple recording the last request that was sent and the last response received, or we could save several requests and responses. | ucfopen/canvasapi | diff --git a/tests/test_requester.py b/tests/test_requester.py
index d65f2d4..fb582a7 100644
--- a/tests/test_requester.py
+++ b/tests/test_requester.py
@@ -77,6 +77,23 @@ class TestRequester(unittest.TestCase):
response = self.requester.request('PUT', 'fake_put_request')
self.assertEqual(response.status_code, 200)
+ def test_request_cache(self, m):
+ register_uris({'requests': ['get']}, m)
+
+ response = self.requester.request('GET', 'fake_get_request')
+ self.assertEqual(response, self.requester._cache[0])
+
+ def test_request_cache_clear_after_5(self, m):
+ register_uris({'requests': ['get', 'post']}, m)
+
+ for i in range(5):
+ self.requester.request('GET', 'fake_get_request')
+
+ response = self.requester.request('POST', 'fake_post_request')
+
+ self.assertLessEqual(len(self.requester._cache), 5)
+ self.assertEqual(response, self.requester._cache[0])
+
def test_request_400(self, m):
register_uris({'requests': ['400']}, m)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"pycodestyle",
"requests_mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ucfopen/canvasapi.git@d05071ddc2e4a37ebd49d162f4f080757befeb4c#egg=canvasapi
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
flake8==7.2.0
idna==3.10
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
pytz==2025.2
requests==2.32.3
requests-mock==1.12.1
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- idna==3.10
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- pytz==2025.2
- requests==2.32.3
- requests-mock==1.12.1
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_requester.py::TestRequester::test_request_cache",
"tests/test_requester.py::TestRequester::test_request_cache_clear_after_5"
] | [] | [
"tests/test_requester.py::TestRequester::test_request_400",
"tests/test_requester.py::TestRequester::test_request_401_InvalidAccessToken",
"tests/test_requester.py::TestRequester::test_request_401_Unauthorized",
"tests/test_requester.py::TestRequester::test_request_404",
"tests/test_requester.py::TestRequester::test_request_500",
"tests/test_requester.py::TestRequester::test_request_delete",
"tests/test_requester.py::TestRequester::test_request_get",
"tests/test_requester.py::TestRequester::test_request_get_datetime",
"tests/test_requester.py::TestRequester::test_request_post",
"tests/test_requester.py::TestRequester::test_request_post_datetime",
"tests/test_requester.py::TestRequester::test_request_put"
] | [] | MIT License | 1,888 | 230 | [
"canvasapi/requester.py"
] |
|
witchard__grole-15 | d47d0ec83f7d76912d5b6b00fd130d79f892939c | 2017-11-17 02:19:46 | a766ad29789b27e75f388ef0f7ce8d999d52c4e4 | coveralls:
[](https://coveralls.io/builds/14246503)
Coverage decreased (-0.8%) to 89.64% when pulling **b79589e9daec23c08537795327876a336be505df on errors** into **d47d0ec83f7d76912d5b6b00fd130d79f892939c on master**.
coveralls:
[](https://coveralls.io/builds/14246507)
Coverage increased (+0.1%) to 90.583% when pulling **b79589e9daec23c08537795327876a336be505df on errors** into **d47d0ec83f7d76912d5b6b00fd130d79f892939c on master**.
coveralls:
[](https://coveralls.io/builds/14246523)
Coverage increased (+5.6%) to 96.035% when pulling **ed28971b3184dcc7a9a4275edd8fc66ac7d1bd61 on errors** into **d47d0ec83f7d76912d5b6b00fd130d79f892939c on master**.
| diff --git a/grole.py b/grole.py
index 72eba4a..bd9ed83 100755
--- a/grole.py
+++ b/grole.py
@@ -376,6 +376,9 @@ class Grole:
self._logger.info('{}: {} -> {}'.format(peer, req.path, res.code))
except EOFError:
self._logger.debug('Connection closed from {}'.format(peer))
+ except Exception as e:
+ self._logger.error('Connection error ({}) from {}'.format(e, peer))
+ writer.close()
def run(self, host='localhost', port=1234):
"""
@@ -389,7 +392,11 @@ class Grole:
# Setup loop
loop = asyncio.get_event_loop()
coro = asyncio.start_server(self._handle, host, port, loop=loop)
- server = loop.run_until_complete(coro)
+ try:
+ server = loop.run_until_complete(coro)
+ except Exception as e:
+ self._logger.error('Could not launch server: {}'.format(e))
+ return
# Run the server
self._logger.info('Serving on {}'.format(server.sockets[0].getsockname()))
| HTTPS connections result in exceptions
To reproduce simply run grole.py and connect to https://127.0.0.1:1234/
ERROR:asyncio:Task exception was never retrieved
future: <Task finished coro=<Grole._handle() done, defined at ./grole.py:336> exception=UnicodeDecodeError('utf-8', b'[Long hex string that I redacted]\n', 4, 5, 'invalid start byte')>
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/tasks.py", line 239, in _step
result = coro.send(None)
File "./grole.py", line 349, in _handle
await req._read(reader)
File "./grole.py", line 45, in _read
self.method, self.location, self.version = start_line.decode().split()
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xae in position 4: invalid start byte | witchard/grole | diff --git a/test/helpers.py b/test/helpers.py
index 51c1394..85f307b 100644
--- a/test/helpers.py
+++ b/test/helpers.py
@@ -32,3 +32,19 @@ class FakeWriter():
def get_extra_info(self, arg):
return 'fake'
+
+class ErrorWriter():
+ def __init__(self):
+ self.closed = False
+
+ async def drain(self):
+ return
+
+ def write(self, data):
+ raise Exception('Broken')
+
+ def get_extra_info(self, arg):
+ return 'fake'
+
+ def close(self):
+ self.closed = True
diff --git a/test/test_grole.py b/test/test_grole.py
index 26eb5e7..02a7a6d 100644
--- a/test/test_grole.py
+++ b/test/test_grole.py
@@ -1,6 +1,6 @@
import unittest
import pathlib
-from helpers import FakeReader, FakeWriter, a_wait
+from helpers import *
import grole
@@ -43,6 +43,18 @@ class TestGrole(unittest.TestCase):
data = wr.data.split(b'\r\n')[0]
self.assertEqual(b'HTTP/1.1 500 Internal Server Error', data)
+ def test_close_big_error(self):
+ @self.app.route('/')
+ def error(env, req):
+ a = []
+ return a[1]
+
+ rd = FakeReader(data=b'GET / HTTP/1.1\r\n\r\n')
+ wr = ErrorWriter()
+ a_wait(self.app._handle(rd, wr))
+ self.assertTrue(wr.closed)
+
+
def test_404(self):
rd = FakeReader(data=b'GET / HTTP/1.1\r\n\r\n')
wr = FakeWriter()
diff --git a/test/test_main.py b/test/test_main.py
new file mode 100644
index 0000000..4df7d18
--- /dev/null
+++ b/test/test_main.py
@@ -0,0 +1,17 @@
+import unittest
+
+import grole
+
+class TestMain(unittest.TestCase):
+
+ def test_launch(self):
+ # Success is that it doesn't do anything
+ grole.main(['-p', '80'])
+
+ def test_launch2(self):
+ # Success is that it doesn't do anything
+ grole.main(['-p', '80', '-q'])
+
+ def test_launch3(self):
+ # Success is that it doesn't do anything
+ grole.main(['-p', '80', '-v'])
diff --git a/test/test_serve.py b/test/test_serve.py
index 1e6955c..45a725e 100644
--- a/test/test_serve.py
+++ b/test/test_serve.py
@@ -36,4 +36,11 @@ class TestServe(unittest.TestCase):
self.assertEqual(html, b'foo\n')
p.terminate()
+ def test_https(self):
+ p = multiprocessing.Process(target=simple_server)
+ p.start()
+ time.sleep(0.1)
+ self.assertRaises(urllib.error.URLError)
+ p.terminate()
+
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"coverage",
"coveralls",
"pytest"
],
"pre_install": [],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
-e git+https://github.com/witchard/grole.git@d47d0ec83f7d76912d5b6b00fd130d79f892939c#egg=grole
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
requests==2.27.1
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: grole
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- idna==3.10
- requests==2.27.1
- urllib3==1.26.20
prefix: /opt/conda/envs/grole
| [
"test/test_grole.py::TestGrole::test_close_big_error",
"test/test_main.py::TestMain::test_launch",
"test/test_main.py::TestMain::test_launch2",
"test/test_main.py::TestMain::test_launch3"
] | [] | [
"test/test_grole.py::TestGrole::test_404",
"test/test_grole.py::TestGrole::test_async",
"test/test_grole.py::TestGrole::test_error",
"test/test_grole.py::TestGrole::test_hello",
"test/test_grole.py::TestStatic::test_file",
"test/test_grole.py::TestStatic::test_index",
"test/test_grole.py::TestStatic::test_index2",
"test/test_grole.py::TestStatic::test_notfound",
"test/test_grole.py::TestDoc::test_doc",
"test/test_serve.py::TestServe::test_fileserver",
"test/test_serve.py::TestServe::test_https",
"test/test_serve.py::TestServe::test_simple"
] | [] | MIT License | 1,899 | 281 | [
"grole.py"
] |
jwplayer__jwplatform-py-20 | be9d31a94f85b8846c8517b5fe2b065c5d7bfad9 | 2017-11-17 21:41:54 | be9d31a94f85b8846c8517b5fe2b065c5d7bfad9 | diff --git a/jwplatform/client.py b/jwplatform/client.py
index 18edad5..f00b7e1 100644
--- a/jwplatform/client.py
+++ b/jwplatform/client.py
@@ -61,11 +61,11 @@ class Client(object):
self.__key = key
self.__secret = secret
- self._scheme = kwargs.pop('scheme', 'https')
- self._host = kwargs.pop('host', 'api.jwplatform.com')
- self._port = int(kwargs.pop('port', 80))
- self._api_version = kwargs.pop('version', 'v1')
- self._agent = kwargs.pop('agent', None)
+ self._scheme = kwargs.get('scheme') or 'https'
+ self._host = kwargs.get('host') or 'api.jwplatform.com'
+ self._port = int(kwargs['port']) if kwargs.get('port') else 80
+ self._api_version = kwargs.get('version') or 'v1'
+ self._agent = kwargs.get('agent')
self._connection = requests.Session()
self._connection.mount(self._scheme, RetryAdapter())
| Client with null kwargs does not use default parameters
Currently jwplatform.Client instantiation only uses default parameters if a kwarg doesn't exist. If the kwarg is `None` this value is still used. It would be expected that `None` values for a kwarg use the default value.
**Current**
```python
>>> import jwplatform
>>> client = jwplatform.Client('key', 'secret', host=None)
>>> client._host == 'api.jwplatform.com'
False
```
**Expected**
```python
>>> import jwplatform
>>> client = jwplatform.Client('key', 'secret', host=None)
>>> client._host == 'api.jwplatform.com'
True
```
| jwplayer/jwplatform-py | diff --git a/tests/test_init.py b/tests/test_init.py
index dc3ab77..3502914 100644
--- a/tests/test_init.py
+++ b/tests/test_init.py
@@ -50,3 +50,33 @@ def test_custom_initialization():
assert 'User-Agent' in jwp_client._connection.headers
assert jwp_client._connection.headers['User-Agent'] == \
'python-jwplatform/{}-{}'.format(jwplatform.__version__, AGENT)
+
+
+def test_custom_initialization_empty_kwargs():
+
+ KEY = 'api_key'
+ SECRET = 'api_secret'
+ SCHEME = None
+ HOST = None
+ PORT = None
+ API_VERSION = None
+ AGENT = None
+
+ jwp_client = jwplatform.Client(
+ KEY, SECRET,
+ scheme=SCHEME,
+ host=HOST,
+ port=PORT,
+ version=API_VERSION,
+ agent=AGENT)
+
+ assert jwp_client._Client__key == KEY
+ assert jwp_client._Client__secret == SECRET
+ assert jwp_client._scheme == 'https'
+ assert jwp_client._host == 'api.jwplatform.com'
+ assert jwp_client._port == 80
+ assert jwp_client._api_version == 'v1'
+ assert jwp_client._agent is None
+ assert 'User-Agent' in jwp_client._connection.headers
+ assert jwp_client._connection.headers['User-Agent'] == \
+ 'python-jwplatform/{}'.format(jwplatform.__version__)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pytest",
"responses"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
-e git+https://github.com/jwplayer/jwplatform-py.git@be9d31a94f85b8846c8517b5fe2b065c5d7bfad9#egg=jwplatform
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
tomli==2.2.1
urllib3==2.3.0
| name: jwplatform-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/jwplatform-py
| [
"tests/test_init.py::test_custom_initialization_empty_kwargs"
] | [] | [
"tests/test_init.py::test_default_initialization",
"tests/test_init.py::test_custom_initialization"
] | [] | MIT License | 1,902 | 268 | [
"jwplatform/client.py"
] |
|
Azure__msrest-for-python-67 | 24deba7a7a9e335314058ec2d0b39a710f61be60 | 2017-11-20 21:05:32 | 24deba7a7a9e335314058ec2d0b39a710f61be60 | diff --git a/msrest/service_client.py b/msrest/service_client.py
index eed50c5..d86fcbb 100644
--- a/msrest/service_client.py
+++ b/msrest/service_client.py
@@ -164,10 +164,15 @@ class ServiceClient(object):
"""
if content is None:
content = {}
- file_data = {f: self._format_data(d) for f, d in content.items()}
- if headers:
- headers.pop('Content-Type', None)
- return self.send(request, headers, None, files=file_data, **config)
+ content_type = headers.pop('Content-Type', None) if headers else None
+
+ if content_type and content_type.lower() == 'application/x-www-form-urlencoded':
+ # Do NOT use "add_content" that assumes input is JSON
+ request.data = {f: d for f, d in content.items() if d is not None}
+ return self.send(request, headers, None, **config)
+ else: # Assume "multipart/form-data"
+ file_data = {f: self._format_data(d) for f, d in content.items() if d is not None}
+ return self.send(request, headers, None, files=file_data, **config)
def send(self, request, headers=None, content=None, **config):
"""Prepare and send request object according to configuration.
| Optional formData parameters crash msrest
If a parameter that is supposed to be formData is optional, we give `None` to requests:
```python
files = [('Text', (None, 'cognituve services')), ('Mode', (None, None)), ('PreContextText', (None, None)), ('PostContextText', (None, None))]
data = {}
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
> fdata = fp.read()
E AttributeError: 'NoneType' object has no attribute 'read'
``` | Azure/msrest-for-python | diff --git a/tests/test_client.py b/tests/test_client.py
index ee10d48..650eac5 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -204,6 +204,17 @@ class TestServiceClient(unittest.TestCase):
ServiceClient.send_formdata(mock_client, request, {'Content-Type':'1234'}, {'1':'1', '2':'2'})
mock_client.send.assert_called_with(request, {}, None, files={'1':'formatted', '2':'formatted'})
+ ServiceClient.send_formdata(mock_client, request, {'Content-Type':'1234'}, {'1':'1', '2':None})
+ mock_client.send.assert_called_with(request, {}, None, files={'1':'formatted'})
+
+ ServiceClient.send_formdata(mock_client, request, {'Content-Type':'application/x-www-form-urlencoded'}, {'1':'1', '2':'2'})
+ mock_client.send.assert_called_with(request, {}, None)
+ self.assertEqual(request.data, {'1':'1', '2':'2'})
+
+ ServiceClient.send_formdata(mock_client, request, {'Content-Type':'application/x-www-form-urlencoded'}, {'1':'1', '2':None})
+ mock_client.send.assert_called_with(request, {}, None)
+ self.assertEqual(request.data, {'1':'1'})
+
def test_format_data(self):
mock_client = mock.create_autospec(ServiceClient)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"numpy>=1.16.0",
"pandas>=1.0.0"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
isodate==0.7.2
-e git+https://github.com/Azure/msrest-for-python.git@24deba7a7a9e335314058ec2d0b39a710f61be60#egg=msrest
numpy==2.0.2
oauthlib==3.2.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
requests-oauthlib==2.0.0
six==1.17.0
tomli==2.2.1
tzdata==2025.2
urllib3==2.3.0
| name: msrest-for-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- isodate==0.7.2
- numpy==2.0.2
- oauthlib==3.2.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- requests-oauthlib==2.0.0
- six==1.17.0
- tomli==2.2.1
- tzdata==2025.2
- urllib3==2.3.0
prefix: /opt/conda/envs/msrest-for-python
| [
"tests/test_client.py::TestServiceClient::test_client_formdata_send"
] | [] | [
"tests/test_client.py::TestServiceClient::test_client_header",
"tests/test_client.py::TestServiceClient::test_client_request",
"tests/test_client.py::TestServiceClient::test_client_send",
"tests/test_client.py::TestServiceClient::test_format_data",
"tests/test_client.py::TestServiceClient::test_format_url",
"tests/test_client.py::TestServiceClient::test_session_callback"
] | [] | MIT License | 1,905 | 316 | [
"msrest/service_client.py"
] |
|
PyCQA__pyflakes-313 | 8d0f995dafda7105a6966057fd11383f1ea6fb71 | 2017-11-21 03:44:16 | 8a1feac08dae2478e3f67ab4018af86ff4ec56f0 | taion: Hold on, this breaks on http://mypy.readthedocs.io/en/latest/kinds_of_types.html#class-name-forward-references.
taion: This is ready to go now.
taion: Sorry for the repeated updates. This is now fixed.
myint: Thanks!
I was testing this against the CPython standard library and it seemed to crash when run against one of the test files ([`test_functools.py`](https://github.com/python/cpython/blob/bdb8315c21825487b54852ff0511fb4881ea2181/Lib/test/test_functools.py)). You can reproduce it by doing the above or more specifically:
```
$ wget https://raw.githubusercontent.com/python/cpython/bdb8315c21825487b54852ff0511fb4881ea2181/Lib/test/test_functools.py
$ ./py/bin/pyflakes test_functools.py
Traceback (most recent call last):
File "./py/bin/pyflakes", line 11, in <module>
load_entry_point('pyflakes==1.6.0', 'console_scripts', 'pyflakes')()
File "/Users/myint/tmp/pyflakes/py/lib/python3.6/site-packages/pyflakes/api.py", line 208, in main
warnings = checkRecursive(args, reporter)
File "/Users/myint/tmp/pyflakes/py/lib/python3.6/site-packages/pyflakes/api.py", line 165, in checkRecursive
warnings += checkPath(sourcePath, reporter)
File "/Users/myint/tmp/pyflakes/py/lib/python3.6/site-packages/pyflakes/api.py", line 112, in checkPath
return check(codestr, filename, reporter)
File "/Users/myint/tmp/pyflakes/py/lib/python3.6/site-packages/pyflakes/api.py", line 73, in check
w = checker.Checker(tree, filename)
File "/Users/myint/tmp/pyflakes/py/lib/python3.6/site-packages/pyflakes/checker.py", line 498, in __init__
self.runDeferred(self._deferredFunctions)
File "/Users/myint/tmp/pyflakes/py/lib/python3.6/site-packages/pyflakes/checker.py", line 535, in runDeferred
handler()
File "/Users/myint/tmp/pyflakes/py/lib/python3.6/site-packages/pyflakes/checker.py", line 929, in handleForwardAnnotation
parsed = ast.parse(annotation.s).body[0].value
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/ast.py", line 35, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "<unknown>", line 1
This is a new annotation
^
SyntaxError: invalid syntax
```
taion: Ah, my bad. Didn't handle annotations that weren't actually annotations. I've addressed it now and added test cases.
I've also added a new reported error here since there weren't any existing errors that covered this, and I don't think there are any reasons to want illegal annotations in normal code.
If you think this new error is reasonable, could you please advise on whether the naming is appropriate, given that this will be user-facing? The alternative would be to refer to this as a "string literal annotation" rather than a "forward annotation".
taion: Rebased and squashed.
Can you confirm that you're okay with the naming and the semantics around the new error? Something like `a: 'A B'` or `a: 'A; B'` is legal Python, but I think it's vanishing unlikely to actually be intentionally used. | diff --git a/pyflakes/checker.py b/pyflakes/checker.py
index d8f093a..a070822 100644
--- a/pyflakes/checker.py
+++ b/pyflakes/checker.py
@@ -922,6 +922,39 @@ class Checker(object):
self.popScope()
self.scopeStack = saved_stack
+ def handleAnnotation(self, annotation, node):
+ if isinstance(annotation, ast.Str):
+ # Defer handling forward annotation.
+ def handleForwardAnnotation():
+ try:
+ tree = ast.parse(annotation.s)
+ except SyntaxError:
+ self.report(
+ messages.ForwardAnnotationSyntaxError,
+ node,
+ annotation.s,
+ )
+ return
+
+ body = tree.body
+ if len(body) != 1 or not isinstance(body[0], ast.Expr):
+ self.report(
+ messages.ForwardAnnotationSyntaxError,
+ node,
+ annotation.s,
+ )
+ return
+
+ parsed_annotation = tree.body[0].value
+ for descendant in ast.walk(parsed_annotation):
+ ast.copy_location(descendant, annotation)
+
+ self.handleNode(parsed_annotation, node)
+
+ self.deferFunction(handleForwardAnnotation)
+ else:
+ self.handleNode(annotation, node)
+
def ignore(self, node):
pass
@@ -1160,9 +1193,11 @@ class Checker(object):
if arg in args[:idx]:
self.report(messages.DuplicateArgument, node, arg)
- for child in annotations + defaults:
- if child:
- self.handleNode(child, node)
+ for annotation in annotations:
+ self.handleAnnotation(annotation, node)
+
+ for default in defaults:
+ self.handleNode(default, node)
def runFunction():
@@ -1375,7 +1410,7 @@ class Checker(object):
# Otherwise it's not really ast.Store and shouldn't silence
# UndefinedLocal warnings.
self.handleNode(node.target, node)
- self.handleNode(node.annotation, node)
+ self.handleAnnotation(node.annotation, node)
if node.value:
# If the assignment has value, handle the *value* now.
self.handleNode(node.value, node)
diff --git a/pyflakes/messages.py b/pyflakes/messages.py
index 9e9406c..670f95f 100644
--- a/pyflakes/messages.py
+++ b/pyflakes/messages.py
@@ -231,3 +231,11 @@ class AssertTuple(Message):
Assertion test is a tuple, which are always True.
"""
message = 'assertion is always true, perhaps remove parentheses?'
+
+
+class ForwardAnnotationSyntaxError(Message):
+ message = 'syntax error in forward annotation %r'
+
+ def __init__(self, filename, loc, annotation):
+ Message.__init__(self, filename, loc)
+ self.message_args = (annotation,)
| Typing.TYPE_CHECKING imports raise F401
```
$ pip install flake8
```
```
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from my_module.test_class import MyClass
object: 'MyClass' = None
```
Flake 8 gives the following error in this scenario:
`my_module\test_class.py:4:5: F401 'my_module.test_class.MyClass' imported but unused`
My current work around is this:
```
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from my_module.test_class import MyClass # noqa
object: 'MyClass' = None
```
Any better solutions? Couldwe somehow automatically ignore all F401 errors within the `if TYPE_CHECKING:` block? | PyCQA/pyflakes | diff --git a/pyflakes/test/test_other.py b/pyflakes/test/test_other.py
index ba052f1..14f213c 100644
--- a/pyflakes/test/test_other.py
+++ b/pyflakes/test/test_other.py
@@ -1890,3 +1890,77 @@ class TestAsyncStatements(TestCase):
class C:
foo: not_a_real_type = None
''', m.UndefinedName)
+ self.flakes('''
+ from foo import Bar
+ bar: Bar
+ ''')
+ self.flakes('''
+ from foo import Bar
+ bar: 'Bar'
+ ''')
+ self.flakes('''
+ import foo
+ bar: foo.Bar
+ ''')
+ self.flakes('''
+ import foo
+ bar: 'foo.Bar'
+ ''')
+ self.flakes('''
+ from foo import Bar
+ def f(bar: Bar): pass
+ ''')
+ self.flakes('''
+ from foo import Bar
+ def f(bar: 'Bar'): pass
+ ''')
+ self.flakes('''
+ from foo import Bar
+ def f(bar) -> Bar: return bar
+ ''')
+ self.flakes('''
+ from foo import Bar
+ def f(bar) -> 'Bar': return bar
+ ''')
+ self.flakes('''
+ bar: 'Bar'
+ ''', m.UndefinedName)
+ self.flakes('''
+ bar: 'foo.Bar'
+ ''', m.UndefinedName)
+ self.flakes('''
+ from foo import Bar
+ bar: str
+ ''', m.UnusedImport)
+ self.flakes('''
+ from foo import Bar
+ def f(bar: str): pass
+ ''', m.UnusedImport)
+ self.flakes('''
+ def f(a: A) -> A: pass
+ class A: pass
+ ''', m.UndefinedName, m.UndefinedName)
+ self.flakes('''
+ def f(a: 'A') -> 'A': return a
+ class A: pass
+ ''')
+ self.flakes('''
+ a: A
+ class A: pass
+ ''', m.UndefinedName)
+ self.flakes('''
+ a: 'A'
+ class A: pass
+ ''')
+ self.flakes('''
+ a: 'A B'
+ ''', m.ForwardAnnotationSyntaxError)
+ self.flakes('''
+ a: 'A; B'
+ ''', m.ForwardAnnotationSyntaxError)
+ self.flakes('''
+ a: '1 + 2'
+ ''')
+ self.flakes('''
+ a: 'a: "A"'
+ ''', m.ForwardAnnotationSyntaxError)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"flake8",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
-e git+https://github.com/PyCQA/pyflakes.git@8d0f995dafda7105a6966057fd11383f1ea6fb71#egg=pyflakes
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pyflakes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
prefix: /opt/conda/envs/pyflakes
| [
"pyflakes/test/test_other.py::TestAsyncStatements::test_variable_annotations"
] | [] | [
"pyflakes/test/test_other.py::Test::test_attrAugmentedAssignment",
"pyflakes/test/test_other.py::Test::test_breakInsideLoop",
"pyflakes/test/test_other.py::Test::test_breakOutsideLoop",
"pyflakes/test/test_other.py::Test::test_classFunctionDecorator",
"pyflakes/test/test_other.py::Test::test_classNameDefinedPreviously",
"pyflakes/test/test_other.py::Test::test_classNameUndefinedInClassBody",
"pyflakes/test/test_other.py::Test::test_classRedefinedAsFunction",
"pyflakes/test/test_other.py::Test::test_classRedefinition",
"pyflakes/test/test_other.py::Test::test_classWithReturn",
"pyflakes/test/test_other.py::Test::test_classWithYield",
"pyflakes/test/test_other.py::Test::test_classWithYieldFrom",
"pyflakes/test/test_other.py::Test::test_comparison",
"pyflakes/test/test_other.py::Test::test_containment",
"pyflakes/test/test_other.py::Test::test_continueInFinally",
"pyflakes/test/test_other.py::Test::test_continueInsideLoop",
"pyflakes/test/test_other.py::Test::test_continueOutsideLoop",
"pyflakes/test/test_other.py::Test::test_defaultExceptLast",
"pyflakes/test/test_other.py::Test::test_defaultExceptNotLast",
"pyflakes/test/test_other.py::Test::test_doubleAssignmentConditionally",
"pyflakes/test/test_other.py::Test::test_doubleAssignmentWithUse",
"pyflakes/test/test_other.py::Test::test_duplicateArgs",
"pyflakes/test/test_other.py::Test::test_ellipsis",
"pyflakes/test/test_other.py::Test::test_extendedSlice",
"pyflakes/test/test_other.py::Test::test_functionDecorator",
"pyflakes/test/test_other.py::Test::test_functionRedefinedAsClass",
"pyflakes/test/test_other.py::Test::test_globalDeclaredInDifferentScope",
"pyflakes/test/test_other.py::Test::test_identity",
"pyflakes/test/test_other.py::Test::test_localReferencedBeforeAssignment",
"pyflakes/test/test_other.py::Test::test_loopControl",
"pyflakes/test/test_other.py::Test::test_modernProperty",
"pyflakes/test/test_other.py::Test::test_moduleWithReturn",
"pyflakes/test/test_other.py::Test::test_moduleWithYield",
"pyflakes/test/test_other.py::Test::test_moduleWithYieldFrom",
"pyflakes/test/test_other.py::Test::test_redefinedClassFunction",
"pyflakes/test/test_other.py::Test::test_redefinedFunction",
"pyflakes/test/test_other.py::Test::test_redefinedIfElseFunction",
"pyflakes/test/test_other.py::Test::test_redefinedIfElseInListComp",
"pyflakes/test/test_other.py::Test::test_redefinedIfFunction",
"pyflakes/test/test_other.py::Test::test_redefinedInDictComprehension",
"pyflakes/test/test_other.py::Test::test_redefinedInGenerator",
"pyflakes/test/test_other.py::Test::test_redefinedInSetComprehension",
"pyflakes/test/test_other.py::Test::test_redefinedTryExceptFunction",
"pyflakes/test/test_other.py::Test::test_redefinedTryFunction",
"pyflakes/test/test_other.py::Test::test_starredAssignmentErrors",
"pyflakes/test/test_other.py::Test::test_starredAssignmentNoError",
"pyflakes/test/test_other.py::Test::test_unaryPlus",
"pyflakes/test/test_other.py::Test::test_undefinedBaseClass",
"pyflakes/test/test_other.py::Test::test_varAugmentedAssignment",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_static",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_tuple",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_tuple_empty",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_with_message",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_without_message",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignInForLoop",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignInListComprehension",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignToGlobal",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignToMember",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignToNonlocal",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignmentInsideLoop",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_augmentedAssignmentImportedFunctionCall",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_closedOver",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_dictComprehension",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_doubleClosedOver",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptWithoutNameInFunction",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptWithoutNameInFunctionTuple",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptionUnusedInExcept",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptionUnusedInExceptInFunction",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptionUsedInExcept",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_f_string",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_generatorExpression",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_ifexp",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_listUnpacking",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_setComprehensionAndLiteral",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_tracebackhideSpecialVariable",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_tupleUnpacking",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_unusedVariable",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_unusedVariableAsLocals",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_unusedVariableNoLocals",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_variableUsedInLoop",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementAttributeName",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementComplicatedTarget",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementListNames",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementNameDefinedInBody",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementNoNames",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSingleName",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSingleNameRedefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSingleNameUndefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSubscript",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSubscriptUndefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementTupleNames",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementTupleNamesRedefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementTupleNamesUndefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementUndefinedInExpression",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementUndefinedInside",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_yieldFromUndefined",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncDef",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncDefAwait",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncDefUndefined",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncFor",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncWith",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncWithItem",
"pyflakes/test/test_other.py::TestAsyncStatements::test_continueInAsyncForFinally",
"pyflakes/test/test_other.py::TestAsyncStatements::test_formatstring",
"pyflakes/test/test_other.py::TestAsyncStatements::test_loopControlInAsyncFor",
"pyflakes/test/test_other.py::TestAsyncStatements::test_loopControlInAsyncForElse",
"pyflakes/test/test_other.py::TestAsyncStatements::test_matmul"
] | [] | MIT License | 1,906 | 680 | [
"pyflakes/checker.py",
"pyflakes/messages.py"
] |
wearewhys__magnivore-12 | be723f7f575376d0ce25b0590bd46dcc6f34ace8 | 2017-11-21 11:53:18 | acf182faeb0cf80157ec5d7b448b355687dcbd94 | diff --git a/magnivore/Lexicon.py b/magnivore/Lexicon.py
index f9fa08d..84ae7a2 100644
--- a/magnivore/Lexicon.py
+++ b/magnivore/Lexicon.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
import re
+from decimal import Decimal
from functools import reduce
from .Tracker import Tracker
@@ -38,7 +39,8 @@ class Lexicon:
The factor rule multiplies the value by a factor.
"""
value = cls._dot_reduce(rule['from'], target)
- return value * rule['factor']
+ original_type = type(value)
+ return original_type(Decimal(value) * Decimal(rule['factor']))
@classmethod
def format(cls, rule, target):
| Lexicon.factor should check the type of the values
Lexicon.factor should check the values types or errors will happen | wearewhys/magnivore | diff --git a/tests/unit/Lexicon.py b/tests/unit/Lexicon.py
index 4f8e888..f1c85d6 100644
--- a/tests/unit/Lexicon.py
+++ b/tests/unit/Lexicon.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
import re
+from decimal import Decimal
from unittest.mock import MagicMock
from magnivore.Lexicon import Lexicon
@@ -48,17 +49,20 @@ def test_lexicon_transform(target):
assert result == rule['transform'][target.temperature]
[email protected]('from_data, target', [
- ('value', MagicMock(value=100)),
- ('related.value', MagicMock(related=MagicMock(value=100)))
[email protected]('from_data, target, expected', [
+ ('value', MagicMock(value=100), 50),
+ ('value', MagicMock(value=Decimal(100)), Decimal(50)),
+ ('value', MagicMock(value=100.0), 50.0),
+ ('related.value', MagicMock(related=MagicMock(value=100)), 50)
])
-def test_lexicon_factor(from_data, target):
+def test_lexicon_factor(from_data, target, expected):
rule = {
'from': from_data,
'factor': 0.5
}
result = Lexicon.factor(rule, target)
- assert result == 50
+ assert result == expected
+ assert type(result) == type(expected)
@mark.parametrize('from_data, format, expected', [
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aratrum==0.3.2
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
click==8.0.4
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/wearewhys/magnivore.git@be723f7f575376d0ce25b0590bd46dcc6f34ace8#egg=magnivore
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
peewee==3.17.9
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-mock==3.6.1
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
ujson==4.3.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: magnivore
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aratrum==0.3.2
- click==8.0.4
- peewee==3.17.9
- pytest-mock==3.6.1
- ujson==4.3.0
prefix: /opt/conda/envs/magnivore
| [
"tests/unit/Lexicon.py::test_lexicon_factor[value-target0-50]",
"tests/unit/Lexicon.py::test_lexicon_factor[value-target1-expected1]",
"tests/unit/Lexicon.py::test_lexicon_factor[related.value-target3-50]"
] | [] | [
"tests/unit/Lexicon.py::test_lexicon_basic",
"tests/unit/Lexicon.py::test_lexicon_basic_dot",
"tests/unit/Lexicon.py::test_lexicon_basic_dot_double",
"tests/unit/Lexicon.py::test_lexicon_basic_null[field]",
"tests/unit/Lexicon.py::test_lexicon_basic_null[table.field]",
"tests/unit/Lexicon.py::test_lexicon_basic_null[table.field.nested]",
"tests/unit/Lexicon.py::test_lexicon_transform[target0]",
"tests/unit/Lexicon.py::test_lexicon_transform[target1]",
"tests/unit/Lexicon.py::test_lexicon_factor[value-target2-50.0]",
"tests/unit/Lexicon.py::test_lexicon_format[birthyear-{}-0-0-1992-0-0]",
"tests/unit/Lexicon.py::test_lexicon_format[from_data1-{}-{}-0-1992-9-0]",
"tests/unit/Lexicon.py::test_lexicon_format_dot[rel.birthyear-{}-0-0-1992-0-0]",
"tests/unit/Lexicon.py::test_lexicon_format_dot[from_data1-{}-{}-0-1992-9-0]",
"tests/unit/Lexicon.py::test_lexicon_match",
"tests/unit/Lexicon.py::test_lexicon_match_none",
"tests/unit/Lexicon.py::test_lexicon_match_from",
"tests/unit/Lexicon.py::test_lexicon_match_dot",
"tests/unit/Lexicon.py::test_lexicon_match_from_none",
"tests/unit/Lexicon.py::test_lexicon_match_none_log",
"tests/unit/Lexicon.py::test_lexicon_sync",
"tests/unit/Lexicon.py::test_lexicon_sync_none",
"tests/unit/Lexicon.py::test_lexicon_static",
"tests/unit/Lexicon.py::test_lexicon_expression",
"tests/unit/Lexicon.py::test_lexicon_expression_dot",
"tests/unit/Lexicon.py::test_lexicon_expression_none"
] | [] | Apache License 2.0 | 1,908 | 193 | [
"magnivore/Lexicon.py"
] |
|
wearewhys__magnivore-14 | acf182faeb0cf80157ec5d7b448b355687dcbd94 | 2017-11-21 15:44:33 | acf182faeb0cf80157ec5d7b448b355687dcbd94 | diff --git a/magnivore/Lexicon.py b/magnivore/Lexicon.py
index 84ae7a2..acf038b 100644
--- a/magnivore/Lexicon.py
+++ b/magnivore/Lexicon.py
@@ -2,6 +2,7 @@
import re
from decimal import Decimal
from functools import reduce
+from math import ceil, floor
from .Tracker import Tracker
@@ -40,7 +41,12 @@ class Lexicon:
"""
value = cls._dot_reduce(rule['from'], target)
original_type = type(value)
- return original_type(Decimal(value) * Decimal(rule['factor']))
+ result = Decimal(value) * Decimal(rule['factor'])
+ if 'round' in rule:
+ if rule['round'] == 'up':
+ return original_type(ceil(result))
+ return original_type(floor(result))
+ return original_type(result)
@classmethod
def format(cls, rule, target):
| Add possibility to specify whether to round up or down in factor | wearewhys/magnivore | diff --git a/tests/unit/Lexicon.py b/tests/unit/Lexicon.py
index f1c85d6..3af833c 100644
--- a/tests/unit/Lexicon.py
+++ b/tests/unit/Lexicon.py
@@ -65,6 +65,19 @@ def test_lexicon_factor(from_data, target, expected):
assert type(result) == type(expected)
[email protected]('rounding, expected', [
+ ('down', 47),
+ ('up', 48)
+])
+def test_lexicon_factor_round(rounding, expected):
+ rule = {
+ 'from': 'value',
+ 'round': rounding,
+ 'factor': 0.5
+ }
+ assert Lexicon.factor(rule, MagicMock(value=95)) == expected
+
+
@mark.parametrize('from_data, format, expected', [
('birthyear', '{}-0-0', '1992-0-0'),
(['birthyear', 'birthmonth'], '{}-{}-0', '1992-9-0')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-mock",
"psycopg2",
"PyMySQL"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aratrum==0.3.2
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
click==8.0.4
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/wearewhys/magnivore.git@acf182faeb0cf80157ec5d7b448b355687dcbd94#egg=magnivore
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
peewee==3.17.9
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psycopg2==2.7.7
py @ file:///opt/conda/conda-bld/py_1644396412707/work
PyMySQL==1.0.2
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-mock==3.6.1
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
ujson==4.3.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: magnivore
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aratrum==0.3.2
- click==8.0.4
- peewee==3.17.9
- psycopg2==2.7.7
- pymysql==1.0.2
- pytest-mock==3.6.1
- ujson==4.3.0
prefix: /opt/conda/envs/magnivore
| [
"tests/unit/Lexicon.py::test_lexicon_factor_round[up-48]"
] | [] | [
"tests/unit/Lexicon.py::test_lexicon_basic",
"tests/unit/Lexicon.py::test_lexicon_basic_dot",
"tests/unit/Lexicon.py::test_lexicon_basic_dot_double",
"tests/unit/Lexicon.py::test_lexicon_basic_null[field]",
"tests/unit/Lexicon.py::test_lexicon_basic_null[table.field]",
"tests/unit/Lexicon.py::test_lexicon_basic_null[table.field.nested]",
"tests/unit/Lexicon.py::test_lexicon_transform[target0]",
"tests/unit/Lexicon.py::test_lexicon_transform[target1]",
"tests/unit/Lexicon.py::test_lexicon_factor[value-target0-50]",
"tests/unit/Lexicon.py::test_lexicon_factor[value-target1-expected1]",
"tests/unit/Lexicon.py::test_lexicon_factor[value-target2-50.0]",
"tests/unit/Lexicon.py::test_lexicon_factor[related.value-target3-50]",
"tests/unit/Lexicon.py::test_lexicon_factor_round[down-47]",
"tests/unit/Lexicon.py::test_lexicon_format[birthyear-{}-0-0-1992-0-0]",
"tests/unit/Lexicon.py::test_lexicon_format[from_data1-{}-{}-0-1992-9-0]",
"tests/unit/Lexicon.py::test_lexicon_format_dot[rel.birthyear-{}-0-0-1992-0-0]",
"tests/unit/Lexicon.py::test_lexicon_format_dot[from_data1-{}-{}-0-1992-9-0]",
"tests/unit/Lexicon.py::test_lexicon_match",
"tests/unit/Lexicon.py::test_lexicon_match_none",
"tests/unit/Lexicon.py::test_lexicon_match_from",
"tests/unit/Lexicon.py::test_lexicon_match_dot",
"tests/unit/Lexicon.py::test_lexicon_match_from_none",
"tests/unit/Lexicon.py::test_lexicon_match_none_log",
"tests/unit/Lexicon.py::test_lexicon_sync",
"tests/unit/Lexicon.py::test_lexicon_sync_none",
"tests/unit/Lexicon.py::test_lexicon_static",
"tests/unit/Lexicon.py::test_lexicon_expression",
"tests/unit/Lexicon.py::test_lexicon_expression_dot",
"tests/unit/Lexicon.py::test_lexicon_expression_none"
] | [] | Apache License 2.0 | 1,909 | 228 | [
"magnivore/Lexicon.py"
] |
|
Hrabal__TemPy-30 | 7995be8f846c0aa8338fa0f3bc01aa3e3a21a6b8 | 2017-11-21 20:30:31 | 7995be8f846c0aa8338fa0f3bc01aa3e3a21a6b8 | diff --git a/tempy/tempy.py b/tempy/tempy.py
index 3385be4..83ec459 100755
--- a/tempy/tempy.py
+++ b/tempy/tempy.py
@@ -2,19 +2,20 @@
# @author: Federico Cerchiari <[email protected]>
"""Main Tempy classes"""
import html
+from collections import deque, Iterable
from copy import copy
-from uuid import uuid4
-from itertools import chain
from functools import wraps
-from collections import deque
+from itertools import chain
from types import GeneratorType
+from uuid import uuid4
-from .exceptions import TagError, WrongContentError, DOMModByKeyError, DOMModByIndexError
+from .exceptions import TagError, WrongContentError, WrongArgsError, DOMModByKeyError, DOMModByIndexError
from .tempyrepr import REPRFinder
class DOMGroup:
"""Wrapper used to manage element insertion."""
+
def __init__(self, name, obj):
super().__init__()
if not name and issubclass(obj.__class__, DOMElement):
@@ -167,7 +168,7 @@ class DOMElement(REPRFinder):
if n == 0:
self.parent.pop(self._own_index)
return self
- return self.after(self * (n-1))
+ return self.after(self * (n - 1))
def to_code(self, pretty=False):
ret = []
@@ -252,6 +253,7 @@ class DOMElement(REPRFinder):
# this trick is used to avoid circular imports
class Patched(tempyREPR_cls, DOMElement):
pass
+
child = Patched(child)
try:
yield child.render(pretty=pretty)
@@ -272,6 +274,7 @@ class DOMElement(REPRFinder):
Takes args and kwargs and calls the decorated method one time for each argument provided.
The reverse parameter should be used for prepending (relative to self) methods.
"""
+
def _receiver(func):
@wraps(func)
def wrapped(inst, *tags, **kwtags):
@@ -279,7 +282,9 @@ class DOMElement(REPRFinder):
inst._stable = False
func(inst, i, dom_group)
return inst
+
return wrapped
+
return _receiver
def _insert(self, dom_group, idx=None, prepend=False):
@@ -298,7 +303,7 @@ class DOMElement(REPRFinder):
for i_group, elem in enumerate(dom_group):
if elem is not None:
# Element insertion in this DOMElement childs
- self.childs.insert(idx+i_group, elem)
+ self.childs.insert(idx + i_group, elem)
# Managing child attributes if needed
if issubclass(elem.__class__, DOMElement):
elem.parent = self
@@ -393,7 +398,6 @@ class DOMElement(REPRFinder):
def wrap(self, other):
"""Wraps this element inside another empty tag."""
- # TODO: make multiple with content_receiver
if other.childs:
raise TagError(self, 'Wrapping in a non empty Tag is forbidden.')
if self.parent:
@@ -402,6 +406,51 @@ class DOMElement(REPRFinder):
other.append(self)
return self
+ def wrap_many(self, *args, strict=False):
+ """Wraps different copies of this element inside all empty tags
+ listed in params or param's (non-empty) iterators.
+
+ Returns list of copies of this element wrapped inside args
+ or None if not succeeded, in the same order and same structure,
+ i.e. args = (Div(), (Div())) -> value = (A(...), (A(...)))
+
+ If on some args it must raise TagError, it will only if strict is True,
+ otherwise it will do nothing with them and return Nones on their positions"""
+
+ for arg in args:
+ is_elem = arg and isinstance(arg, DOMElement)
+ is_elem_iter = (not is_elem and arg and isinstance(arg, Iterable) and
+ isinstance(iter(arg).__next__(), DOMElement))
+ if not (is_elem or is_elem_iter):
+ raise WrongArgsError(self, 'Argument {} is not DOMElement nor iterable of DOMElements'.format(arg))
+
+ wcopies = []
+ failure = []
+
+ def wrap_next(tag, idx):
+ nonlocal wcopies, failure
+ next_copy = self.__copy__()
+ try:
+ return next_copy.wrap(tag)
+ except TagError:
+ failure.append(idx)
+ return next_copy
+
+ for arg_idx, arg in enumerate(args):
+ if isinstance(arg, DOMElement):
+ wcopies.append(wrap_next(arg, (arg_idx, -1)))
+ else:
+ iter_wcopies = []
+ for iter_idx, t in enumerate(arg):
+ iter_wcopies.append(wrap_next(t, (arg_idx, iter_idx)))
+ wcopies.append(type(arg)(iter_wcopies))
+
+ if failure and strict:
+ raise TagError(self, 'Wrapping in a non empty Tag is forbidden, failed on arguments ' +
+ ', '.join(list(map(lambda idx: str(idx[0]) if idx[1] == -1 else '[{1}] of {0}'.format(*idx),
+ failure))))
+ return wcopies
+
def wrap_inner(self, other):
self.move_childs(other)
self(other)
@@ -599,7 +648,6 @@ class DOMElement(REPRFinder):
class Escaped(DOMElement):
-
def __init__(self, content, **kwargs):
super().__init__(**kwargs)
self._render = content
| TODO: Wrapping into multiple containers
`DOMElement.wrap` now accepts a single `DOMElement` instance and wraps `self` into this other instance.
Method signature should change to accept a iterable as first argument or multiple (single or iterable) arguments. `self` (copies of) will be wrapped inside each given element, i.e:
some_divs = [Div() for _ in range(10)]
to_be_wrapped = Span()
to_be_wrapped.wrap(some_divs, P(), A(), (Pre() for _ in range(10)))
wrap will add `to_be_wrapped` into each div in `some_divs`, and into each `DOMElement` found in args.
Extra implementation:
add kwargs in signature so wrapping will perform a named insertion.
https://codeclimate.com/github/Hrabal/TemPy/tempy/tempy.py#issue_59a0588692503c0001000033 | Hrabal/TemPy | diff --git a/tests/test_DOMElement.py b/tests/test_DOMElement.py
index f8a95d7..87737ff 100755
--- a/tests/test_DOMElement.py
+++ b/tests/test_DOMElement.py
@@ -4,14 +4,13 @@
"""
import unittest
+from tempy.elements import Tag, TagAttrs
+from tempy.exceptions import WrongContentError, WrongArgsError, TagError, DOMModByKeyError, DOMModByIndexError
from tempy.tags import Div, A, P, Html, Head, Body
from tempy.tempy import DOMElement, DOMGroup, Escaped
-from tempy.elements import Tag, TagAttrs
-from tempy.exceptions import WrongContentError, TagError, DOMModByKeyError, DOMModByIndexError
class TestDOMelement(unittest.TestCase):
-
def setUp(self):
self.page = Html()
@@ -84,13 +83,13 @@ class TestDOMelement(unittest.TestCase):
new1 = Div().append_to(self.page)
new2 = Div()
new1.after(new2)
- self.assertEqual(new1._own_index, new2._own_index-1)
+ self.assertEqual(new1._own_index, new2._own_index - 1)
def test_before(self):
new1 = Div().append_to(self.page)
new2 = Div()
new1.before(new2)
- self.assertEqual(new1._own_index, new2._own_index+1)
+ self.assertEqual(new1._own_index, new2._own_index + 1)
def test_prepend(self):
self.page(Div(), Div())
@@ -134,6 +133,53 @@ class TestDOMelement(unittest.TestCase):
self.assertTrue(to_wrap in container)
self.assertTrue(container in outermost)
+ def test_wrap_many(self):
+ def flatten(cnt):
+ res = []
+ for el in cnt:
+ if isinstance(el, DOMElement):
+ res.append(el)
+ else:
+ res.extend(el)
+ return res
+
+ def test_return_values(inp, outp):
+ self.assertEqual(len(inp), len(outp))
+ for _ in range(len(inp)):
+ t1, t2 = type(inp[_]), type(outp[_])
+ self.assertTrue(t1 == t2 or
+ issubclass(t1, DOMElement) and issubclass(t2, DOMElement))
+
+ def test_correctly_wrapped(child, parent):
+ self.assertTrue(child in parent)
+ self.assertTrue(child.get_parent() == parent)
+
+ # check if it works correct with correct arguments
+ args = (Div(), [Div(), Div()], (Div(), Div()))
+ new = A().wrap_many(*args)
+ test_return_values(args, new)
+ for c, p in zip(flatten(new), flatten(args)):
+ test_correctly_wrapped(c, p)
+
+ # check if it raises TagError with strict and returns None without
+ args = (Div()(A()), (Div(), Div()))
+ with self.assertRaisesRegex(TagError, r'^.+arguments 0$'):
+ A().wrap_many(*args, strict=True)
+ new = A().wrap_many(*args)
+ self.assertIs(new[0].get_parent(), None)
+
+ args = (Div()(A()), (Div(), Div()(A())))
+ with self.assertRaisesRegex(TagError, r'^.+arguments 0, \[1\] of 1'):
+ A().wrap_many(*args, strict=True)
+ new = A().wrap_many(*args)
+ self.assertIs(new[0].get_parent(), None)
+ self.assertIs(new[1][1].get_parent(), None)
+
+ # check if it raises WrongArgsError
+ args = (Div(), '')
+ with self.assertRaises(WrongArgsError):
+ A().wrap_many(*args)
+
def test_replace_with(self):
old = Div().append_to(self.page)
old.replace_with(A())
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-coverage"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-cover==3.0.0
pytest-coverage==0.0
-e git+https://github.com/Hrabal/TemPy.git@7995be8f846c0aa8338fa0f3bc01aa3e3a21a6b8#egg=tem_py
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: TemPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- pytest-cov==6.0.0
- pytest-cover==3.0.0
- pytest-coverage==0.0
prefix: /opt/conda/envs/TemPy
| [
"tests/test_DOMElement.py::TestDOMelement::test_wrap_many"
] | [] | [
"tests/test_DOMElement.py::TestDOMelement::test__find_content",
"tests/test_DOMElement.py::TestDOMelement::test__insert_negative_index",
"tests/test_DOMElement.py::TestDOMelement::test_add",
"tests/test_DOMElement.py::TestDOMelement::test_after",
"tests/test_DOMElement.py::TestDOMelement::test_append",
"tests/test_DOMElement.py::TestDOMelement::test_append_to",
"tests/test_DOMElement.py::TestDOMelement::test_attrs",
"tests/test_DOMElement.py::TestDOMelement::test_before",
"tests/test_DOMElement.py::TestDOMelement::test_bft",
"tests/test_DOMElement.py::TestDOMelement::test_children",
"tests/test_DOMElement.py::TestDOMelement::test_childs_index",
"tests/test_DOMElement.py::TestDOMelement::test_clone",
"tests/test_DOMElement.py::TestDOMelement::test_contents",
"tests/test_DOMElement.py::TestDOMelement::test_copy",
"tests/test_DOMElement.py::TestDOMelement::test_create_call_generator",
"tests/test_DOMElement.py::TestDOMelement::test_create_call_list",
"tests/test_DOMElement.py::TestDOMelement::test_create_call_multitag",
"tests/test_DOMElement.py::TestDOMelement::test_create_call_singletag",
"tests/test_DOMElement.py::TestDOMelement::test_create_call_tuple",
"tests/test_DOMElement.py::TestDOMelement::test_create_instantiation",
"tests/test_DOMElement.py::TestDOMelement::test_dft",
"tests/test_DOMElement.py::TestDOMelement::test_dft_reverse",
"tests/test_DOMElement.py::TestDOMelement::test_empty",
"tests/test_DOMElement.py::TestDOMelement::test_equality",
"tests/test_DOMElement.py::TestDOMelement::test_escaped",
"tests/test_DOMElement.py::TestDOMelement::test_get_parent",
"tests/test_DOMElement.py::TestDOMelement::test_getattr",
"tests/test_DOMElement.py::TestDOMelement::test_hash",
"tests/test_DOMElement.py::TestDOMelement::test_iadd",
"tests/test_DOMElement.py::TestDOMelement::test_imul",
"tests/test_DOMElement.py::TestDOMelement::test_imul_zero",
"tests/test_DOMElement.py::TestDOMelement::test_inject",
"tests/test_DOMElement.py::TestDOMelement::test_is_root",
"tests/test_DOMElement.py::TestDOMelement::test_isub",
"tests/test_DOMElement.py::TestDOMelement::test_iter_chidls",
"tests/test_DOMElement.py::TestDOMelement::test_iter_reversed",
"tests/test_DOMElement.py::TestDOMelement::test_move",
"tests/test_DOMElement.py::TestDOMelement::test_move_childs",
"tests/test_DOMElement.py::TestDOMelement::test_mul",
"tests/test_DOMElement.py::TestDOMelement::test_next",
"tests/test_DOMElement.py::TestDOMelement::test_next_all",
"tests/test_DOMElement.py::TestDOMelement::test_next_childs",
"tests/test_DOMElement.py::TestDOMelement::test_next_magic",
"tests/test_DOMElement.py::TestDOMelement::test_own_index",
"tests/test_DOMElement.py::TestDOMelement::test_parent",
"tests/test_DOMElement.py::TestDOMelement::test_pop",
"tests/test_DOMElement.py::TestDOMelement::test_prepend",
"tests/test_DOMElement.py::TestDOMelement::test_prepend_to",
"tests/test_DOMElement.py::TestDOMelement::test_prev",
"tests/test_DOMElement.py::TestDOMelement::test_prev_all",
"tests/test_DOMElement.py::TestDOMelement::test_remove",
"tests/test_DOMElement.py::TestDOMelement::test_render_raise",
"tests/test_DOMElement.py::TestDOMelement::test_replace_with",
"tests/test_DOMElement.py::TestDOMelement::test_reverse",
"tests/test_DOMElement.py::TestDOMelement::test_root",
"tests/test_DOMElement.py::TestDOMelement::test_siblings",
"tests/test_DOMElement.py::TestDOMelement::test_slice",
"tests/test_DOMElement.py::TestDOMelement::test_sub",
"tests/test_DOMElement.py::TestDOMelement::test_wrap",
"tests/test_DOMElement.py::TestDOMelement::test_wrap_inner"
] | [] | Apache License 2.0 | 1,910 | 1,306 | [
"tempy/tempy.py"
] |
|
sprymix__csscompressor-8 | bec3e582cb5ab7182a0ca08ba381e491b94ed10c | 2017-11-25 03:33:22 | bec3e582cb5ab7182a0ca08ba381e491b94ed10c | diff --git a/csscompressor/__init__.py b/csscompressor/__init__.py
index e1ae16b..e34af04 100644
--- a/csscompressor/__init__.py
+++ b/csscompressor/__init__.py
@@ -113,6 +113,7 @@ def _preserve_call_tokens(css, regexp, preserved_tokens, remove_ws=False):
max_idx = len(css) - 1
append_idx = 0
sb = []
+ nest_term = None
for match in regexp.finditer(css):
name = match.group(1)
@@ -121,17 +122,29 @@ def _preserve_call_tokens(css, regexp, preserved_tokens, remove_ws=False):
term = match.group(2) if match.lastindex > 1 else None
if not term:
term = ')'
+ nest_term = '('
found_term = False
end_idx = match.end(0) - 1
+ nest_idx = end_idx if nest_term else 0
+ nested = False
while not found_term and (end_idx + 1) <= max_idx:
+ if nest_term:
+ nest_idx = css.find(nest_term, nest_idx + 1)
end_idx = css.find(term, end_idx + 1)
if end_idx > 0:
+ if nest_idx > 0 and nest_idx < end_idx and \
+ css[nest_idx - 1] != '\\':
+ nested = True
+
if css[end_idx - 1] != '\\':
- found_term = True
- if term != ')':
- end_idx = css.find(')', end_idx)
+ if nested:
+ nested = False
+ else:
+ found_term = True
+ if term != ')':
+ end_idx = css.find(')', end_idx)
else:
raise ValueError('malformed css')
@@ -139,7 +152,7 @@ def _preserve_call_tokens(css, regexp, preserved_tokens, remove_ws=False):
assert found_term
- token = css[start_idx:end_idx]
+ token = css[start_idx:end_idx].strip()
if remove_ws:
token = _ws_re.sub('', token)
| Major breakage when using some calc statements
I noticed csscompressor completely breaks some css rules with + in them. For example:
`>>> csscompressor.compress("calc( (10vh - 100px) + 30px )");
'calc( (10vh - 100px)+30px)'
> > > csscompressor.compress("calc( (10vh - 100px) / 4 + 30px )");
> > > 'calc( (10vh - 100px) / 4+30px)'
> > > `
The + and - operators must always be surrounded by whitespace, according to the [spec](https://developer.mozilla.org/en-US/docs/Web/CSS/calc), and when compressed, this rule is now broken and doesn't work in the browser.
Considering calc should be well-supported by just about every browser updated in the last 4 years, calc is super handy... but unfortunately breaks if you decide to compress with csscompress.
| sprymix/csscompressor | diff --git a/csscompressor/tests/test_compress.py b/csscompressor/tests/test_compress.py
index 8d11e01..e65768a 100644
--- a/csscompressor/tests/test_compress.py
+++ b/csscompressor/tests/test_compress.py
@@ -52,3 +52,16 @@ class Tests(unittest.TestCase):
a {content: calc(10px-10%}
'''
self.assertRaises(ValueError, compress, input)
+
+ def test_nested_1(self):
+ input = '''
+ a { width: calc( (10vh - 100px) / 4 + 30px ) }
+ '''
+ output = compress(input)
+ assert output == "a{width:calc((10vh - 100px) / 4 + 30px)}"
+
+ def test_nested_2(self):
+ input = '''
+ a { width: calc( ((10vh - 100px) / 4 + 30px ) }
+ '''
+ self.assertRaises(ValueError, compress, input)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
-e git+https://github.com/sprymix/csscompressor.git@bec3e582cb5ab7182a0ca08ba381e491b94ed10c#egg=csscompressor
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: csscompressor
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/csscompressor
| [
"csscompressor/tests/test_compress.py::Tests::test_nested_1",
"csscompressor/tests/test_compress.py::Tests::test_nested_2"
] | [] | [
"csscompressor/tests/test_compress.py::Tests::test_compress_1",
"csscompressor/tests/test_compress.py::Tests::test_compress_2",
"csscompressor/tests/test_compress.py::Tests::test_linelen_1",
"csscompressor/tests/test_compress.py::Tests::test_linelen_2",
"csscompressor/tests/test_compress.py::Tests::test_linelen_3"
] | [] | BSD | 1,914 | 509 | [
"csscompressor/__init__.py"
] |
|
networkx__networkx-2773 | 3d7ea0d690e59c2d5d223528ea9e21b21fb7f8a4 | 2017-11-25 17:52:23 | 93b4b9227aa8a7ac4cbd946cf3dae3b168e17b45 | diff --git a/networkx/generators/degree_seq.py b/networkx/generators/degree_seq.py
index 6d57bcf05..c42faebc7 100644
--- a/networkx/generators/degree_seq.py
+++ b/networkx/generators/degree_seq.py
@@ -426,7 +426,7 @@ def expected_degree_graph(w, seed=None, selfloops=True):
# weights dictates the order of the (integer) node labels, so we
# need to remember the permutation applied in the sorting.
order = sorted(enumerate(w), key=itemgetter(1), reverse=True)
- mapping = {c: v for c, (u, v) in enumerate(order)}
+ mapping = {c: u for c, (u, v) in enumerate(order)}
seq = [v for u, v in order]
last = n
if not selfloops:
| node-mapping bug expected_degree_graph
Hi I used the NX1 expected_degree_graph generator. It has the same interface as the NX2.
https://networkx.github.io/documentation/stable/reference/generated/networkx.generators.degree_seq.expected_degree_graph.html#networkx.generators.degree_seq.expected_degree_graph
But NX2 will not generate the graph correctly. The total number of edge is always 1. No further error message provided.
Same script works well for NX1. (I downgrade to older version.
```python
D = 10
N = 1000
degree_l = [D for i in range(N)]
G = nx.expected_degree_graph(degree_l, seed=datetime.now(), selfloops=False)
``` | networkx/networkx | diff --git a/networkx/generators/tests/test_degree_seq.py b/networkx/generators/tests/test_degree_seq.py
index c1aca1791..bde2c7954 100644
--- a/networkx/generators/tests/test_degree_seq.py
+++ b/networkx/generators/tests/test_degree_seq.py
@@ -92,6 +92,8 @@ def test_expected_degree_graph():
# test that fixed seed delivers the same graph
deg_seq = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
G1 = nx.expected_degree_graph(deg_seq, seed=1000)
+ assert_equal(len(G1), 12)
+
G2 = nx.expected_degree_graph(deg_seq, seed=1000)
assert_true(nx.is_isomorphic(G1, G2))
@@ -105,6 +107,7 @@ def test_expected_degree_graph_selfloops():
G1 = nx.expected_degree_graph(deg_seq, seed=1000, selfloops=False)
G2 = nx.expected_degree_graph(deg_seq, seed=1000, selfloops=False)
assert_true(nx.is_isomorphic(G1, G2))
+ assert_equal(len(G1), 12)
def test_expected_degree_graph_skew():
@@ -112,6 +115,7 @@ def test_expected_degree_graph_skew():
G1 = nx.expected_degree_graph(deg_seq, seed=1000)
G2 = nx.expected_degree_graph(deg_seq, seed=1000)
assert_true(nx.is_isomorphic(G1, G2))
+ assert_equal(len(G1), 5)
def test_havel_hakimi_construction():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
decorator==5.1.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@3d7ea0d690e59c2d5d223528ea9e21b21fb7f8a4#egg=networkx
nose==1.3.7
nose-ignore-docstring==0.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- decorator==5.1.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- nose-ignore-docstring==0.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/generators/tests/test_degree_seq.py::test_expected_degree_graph_skew"
] | [] | [
"networkx/generators/tests/test_degree_seq.py::TestConfigurationModel::test_empty_degree_sequence",
"networkx/generators/tests/test_degree_seq.py::TestConfigurationModel::test_degree_zero",
"networkx/generators/tests/test_degree_seq.py::TestConfigurationModel::test_degree_sequence",
"networkx/generators/tests/test_degree_seq.py::TestConfigurationModel::test_random_seed",
"networkx/generators/tests/test_degree_seq.py::TestConfigurationModel::test_directed_disallowed",
"networkx/generators/tests/test_degree_seq.py::TestConfigurationModel::test_odd_degree_sum",
"networkx/generators/tests/test_degree_seq.py::test_directed_configuation_raise_unequal",
"networkx/generators/tests/test_degree_seq.py::test_directed_configuation_mode",
"networkx/generators/tests/test_degree_seq.py::test_expected_degree_graph_empty",
"networkx/generators/tests/test_degree_seq.py::test_expected_degree_graph",
"networkx/generators/tests/test_degree_seq.py::test_expected_degree_graph_selfloops",
"networkx/generators/tests/test_degree_seq.py::test_havel_hakimi_construction",
"networkx/generators/tests/test_degree_seq.py::test_directed_havel_hakimi",
"networkx/generators/tests/test_degree_seq.py::test_degree_sequence_tree",
"networkx/generators/tests/test_degree_seq.py::test_random_degree_sequence_graph",
"networkx/generators/tests/test_degree_seq.py::test_random_degree_sequence_graph_raise",
"networkx/generators/tests/test_degree_seq.py::test_random_degree_sequence_large"
] | [] | BSD 3-Clause | 1,915 | 210 | [
"networkx/generators/degree_seq.py"
] |
|
lbl-srg__BuildingsPy-181 | ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158 | 2017-11-27 20:38:23 | 923b1087e255f7f35224aa7c1653abf9c038f849 | diff --git a/buildingspy/development/error_dictionary.py b/buildingspy/development/error_dictionary.py
index 2304f63..4a3c8d3 100644
--- a/buildingspy/development/error_dictionary.py
+++ b/buildingspy/development/error_dictionary.py
@@ -138,6 +138,13 @@ class ErrorDictionary(object):
'model_message': "\"inner Modelica.StateGraph.StateGraphRoot\" is missing in '{}'.\n",
'summary_message': "Number of models with missing StateGraphRoot : {}\n"}
+ self._error_dict["mismatched displayUnits"] = {
+ 'tool_message': "Mismatched displayUnit",
+ 'counter': 0,
+ 'buildingspy_var': "iMisDisUni",
+ 'model_message': "\"Mismatched displayUnit in '{}'.\n",
+ 'summary_message': "Number of models with mismatched displayUnit : {}\n"}
+
def get_dictionary(self):
""" Return the dictionary with all error data
"""
diff --git a/buildingspy/development/refactor.py b/buildingspy/development/refactor.py
index ba4d363..2c064b0 100644
--- a/buildingspy/development/refactor.py
+++ b/buildingspy/development/refactor.py
@@ -637,6 +637,10 @@ def move_class(source, target):
"""
##############################################################
+ # First, remove empty subdirectories
+ _remove_empty_folders(source.replace(".", os.path.sep),
+ removeRoot=False)
+ ##############################################################
# Check if it is a directory with a package.mo file
if os.path.isdir(source.replace(".", os.path.sep)):
_move_class_directory(source, target)
@@ -665,6 +669,26 @@ def move_class(source, target):
_update_all_references(source, target)
+def _remove_empty_folders(path, removeRoot=True):
+ ''' Remove empty directories
+ '''
+ if not os.path.isdir(path):
+ return
+
+ # remove empty subfolders
+ files = os.listdir(path)
+ if len(files):
+ for f in files:
+ fullpath = os.path.join(path, f)
+ if os.path.isdir(fullpath):
+ _remove_empty_folders(fullpath)
+
+ # if folder empty, delete it
+ files = os.listdir(path)
+ if len(files) == 0 and removeRoot:
+ os.rmdir(path)
+
+
def _update_all_references(source, target):
""" Updates all references in `.mo` and `.mos` files.
| add test for Mismatched displayUnit
Add a test for `Mismatched displayUnit` to the regression testing | lbl-srg/BuildingsPy | diff --git a/buildingspy/tests/test_development_error_dictionary.py b/buildingspy/tests/test_development_error_dictionary.py
index 393e2cc..ee9d12b 100644
--- a/buildingspy/tests/test_development_error_dictionary.py
+++ b/buildingspy/tests/test_development_error_dictionary.py
@@ -39,7 +39,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'type inconsistent definition equations',
'unspecified initial conditions',
'unused connector',
- 'stateGraphRoot missing'])
+ 'stateGraphRoot missing',
+ 'mismatched displayUnits'])
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
@@ -63,7 +64,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'Type inconsistent definition equation',
'Dymola has selected default initial condition',
'Warning: The following connector variables are not used in the model',
- "A \\\"stateGraphRoot\\\" component was automatically introduced."])
+ "A \\\"stateGraphRoot\\\" component was automatically introduced.",
+ "Mismatched displayUnit"])
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytidylib",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y tidy"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
-e git+https://github.com/lbl-srg/BuildingsPy.git@ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158#egg=buildingspy
certifi==2021.5.30
future==1.0.0
gitdb==4.0.9
GitPython==3.1.18
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytidylib==0.3.2
smmap==5.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: BuildingsPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- gitdb==4.0.9
- gitpython==3.1.18
- jinja2==3.0.3
- markupsafe==2.0.1
- pytidylib==0.3.2
- smmap==5.0.0
prefix: /opt/conda/envs/BuildingsPy
| [
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_keys",
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_tool_messages"
] | [] | [] | [] | null | 1,918 | 595 | [
"buildingspy/development/error_dictionary.py",
"buildingspy/development/refactor.py"
] |
|
Azure__WALinuxAgent-951 | ab261b5f2ed0dfce4dd94adf924e68e0aaff5e7b | 2017-11-28 01:19:07 | 6e9b985c1d7d564253a1c344bab01b45093103cd | boumenot: Let me know if this is the style of unit test you were thinking of. | diff --git a/azurelinuxagent/agent.py b/azurelinuxagent/agent.py
index e99f7bed..87ab3c14 100644
--- a/azurelinuxagent/agent.py
+++ b/azurelinuxagent/agent.py
@@ -144,7 +144,7 @@ def main(args=[]):
if command == "version":
version()
elif command == "help":
- usage()
+ print(usage())
elif command == "start":
start(conf_file_path=conf_file_path)
else:
@@ -228,15 +228,16 @@ def version():
def usage():
"""
- Show agent usage
+ Return agent usage message
"""
- print("")
- print((("usage: {0} [-verbose] [-force] [-help] "
+ s = "\n"
+ s += ("usage: {0} [-verbose] [-force] [-help] "
"-configuration-path:<path to configuration file>"
"-deprovision[+user]|-register-service|-version|-daemon|-start|"
- "-run-exthandlers]"
- "").format(sys.argv[0])))
- print("")
+ "-run-exthandlers|-show-configuration]"
+ "").format(sys.argv[0])
+ s += "\n"
+ return s
def start(conf_file_path=None):
"""
| WALA usage prompt lacks of " waagent -show-configuration"
We can perform "waagent -show-configuration" to get current WALA configuration,but "waagent -help"lacks of this usage prompt. | Azure/WALinuxAgent | diff --git a/tests/test_agent.py b/tests/test_agent.py
index 51b157dd..95994dba 100644
--- a/tests/test_agent.py
+++ b/tests/test_agent.py
@@ -15,9 +15,7 @@
# Requires Python 2.4+ and Openssl 1.0+
#
-import mock
import os.path
-import sys
from azurelinuxagent.agent import *
from azurelinuxagent.common.conf import *
@@ -168,3 +166,22 @@ class TestAgent(AgentTestCase):
for k in sorted(configuration.keys()):
actual_configuration.append("{0} = {1}".format(k, configuration[k]))
self.assertEqual(EXPECTED_CONFIGURATION, actual_configuration)
+
+ def test_agent_usage_message(self):
+ message = usage()
+
+ # Python 2.6 does not have assertIn()
+ self.assertTrue("-verbose" in message)
+ self.assertTrue("-force" in message)
+ self.assertTrue("-help" in message)
+ self.assertTrue("-configuration-path" in message)
+ self.assertTrue("-deprovision" in message)
+ self.assertTrue("-register-service" in message)
+ self.assertTrue("-version" in message)
+ self.assertTrue("-daemon" in message)
+ self.assertTrue("-start" in message)
+ self.assertTrue("-run-exthandlers" in message)
+ self.assertTrue("-show-configuration" in message)
+
+ # sanity check
+ self.assertFalse("-not-a-valid-option" in message)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyasn1",
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.4",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyasn1==0.5.1
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
-e git+https://github.com/Azure/WALinuxAgent.git@ab261b5f2ed0dfce4dd94adf924e68e0aaff5e7b#egg=WALinuxAgent
zipp==3.6.0
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyasn1==0.5.1
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/test_agent.py::TestAgent::test_agent_usage_message"
] | [] | [
"tests/test_agent.py::TestAgent::test_accepts_configuration_path",
"tests/test_agent.py::TestAgent::test_agent_accepts_configuration_path",
"tests/test_agent.py::TestAgent::test_agent_does_not_pass_configuration_path",
"tests/test_agent.py::TestAgent::test_agent_ensures_extension_log_directory",
"tests/test_agent.py::TestAgent::test_agent_get_configuration",
"tests/test_agent.py::TestAgent::test_agent_logs_if_extension_log_directory_is_a_file",
"tests/test_agent.py::TestAgent::test_agent_passes_configuration_path",
"tests/test_agent.py::TestAgent::test_agent_uses_default_configuration_path",
"tests/test_agent.py::TestAgent::test_checks_configuration_path",
"tests/test_agent.py::TestAgent::test_configuration_path_defaults_to_none",
"tests/test_agent.py::TestAgent::test_rejects_missing_configuration_path"
] | [] | Apache License 2.0 | 1,920 | 316 | [
"azurelinuxagent/agent.py"
] |
ucfopen__canvasapi-117 | d265d4a49df7041dca888247f48feda7e288cf15 | 2017-11-28 22:30:18 | db3c377b68f2953e1618f4e4588cc2db8603841e | diff --git a/canvasapi/canvas.py b/canvasapi/canvas.py
index 687649c..13193ea 100644
--- a/canvasapi/canvas.py
+++ b/canvasapi/canvas.py
@@ -1,5 +1,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+import warnings
+
from canvasapi.account import Account
from canvasapi.course import Course
from canvasapi.exceptions import RequiredFieldMissing
@@ -10,7 +12,10 @@ from canvasapi.paginated_list import PaginatedList
from canvasapi.requester import Requester
from canvasapi.section import Section
from canvasapi.user import User
-from canvasapi.util import combine_kwargs, obj_or_id
+from canvasapi.util import combine_kwargs, get_institution_url, obj_or_id
+
+
+warnings.simplefilter('always', DeprecationWarning)
class Canvas(object):
@@ -25,6 +30,16 @@ class Canvas(object):
:param access_token: The API key to authenticate requests with.
:type access_token: str
"""
+ new_url = get_institution_url(base_url)
+
+ if 'api/v1' in base_url:
+ warnings.warn(
+ "`base_url` no longer requires an API version be specified. "
+ "Rewriting `base_url` to {}".format(new_url),
+ DeprecationWarning
+ )
+ base_url = new_url + '/api/v1/'
+
self.__requester = Requester(base_url, access_token)
def create_account(self, **kwargs):
diff --git a/canvasapi/util.py b/canvasapi/util.py
index 88c469c..86d6ee4 100644
--- a/canvasapi/util.py
+++ b/canvasapi/util.py
@@ -122,3 +122,19 @@ def obj_or_id(parameter, param_name, object_types):
obj_type_list = ",".join([obj_type.__name__ for obj_type in object_types])
message = 'Parameter {} must be of type {} or int.'.format(param_name, obj_type_list)
raise TypeError(message)
+
+
+def get_institution_url(base_url):
+ """
+ Trim '/api/v1' from a given root URL.
+
+ :param base_url: The base URL of the API.
+ :type base_url: str
+ :rtype: str
+ """
+ index = base_url.find('/api/v1')
+
+ if index != -1:
+ return base_url[0:index]
+
+ return base_url
| Deprecate /api/v1 from Canvas constructor
Currently we require `/api/v1/` to be appended to the end of an institution's Canvas URL when initializing the `Canvas` object.
We don't check if the URL is malformed in anyway, and as a result, sometimes unexpected behavior can present itself. We recently ran into this issue with one of our own applications where the base URL was set to `instructure.edu//api/v1/`.
We should implement a fix for this in two ways:
* Remove the `/api/version` requirement in the URL as we don't currently support targeting more than one API version
* Force URLs to be formatted properly (i.e. `*.someurl.com` with no trailing slash, `https://` or `http://` allowed) and manually append the `/api/v1/` string ourselves afterward
We should also warn the user that `/api/v1/` has been deprecated and is no longer required if they include it in their base URL. | ucfopen/canvasapi | diff --git a/tests/settings.py b/tests/settings.py
index 789a25c..e67c5f6 100644
--- a/tests/settings.py
+++ b/tests/settings.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-BASE_URL = 'http://example.com/api/v1/'
+BASE_URL = 'http://example.com'
+BASE_URL_WITH_VERSION = 'http://example.com/api/v1/'
API_KEY = '123'
INVALID_ID = 9001
diff --git a/tests/test_canvas.py b/tests/test_canvas.py
index dc5ec34..3d75228 100644
--- a/tests/test_canvas.py
+++ b/tests/test_canvas.py
@@ -1,5 +1,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+
import unittest
+import warnings
from datetime import datetime
import pytz
@@ -30,6 +32,12 @@ class TestCanvas(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
+ # Canvas()
+ def test_init_deprecate_url_contains_version(self, m):
+ with warnings.catch_warnings(record=True) as w:
+ Canvas(settings.BASE_URL_WITH_VERSION, settings.API_KEY)
+ self.assertTrue(issubclass(w[0].category, DeprecationWarning))
+
# create_account()
def test_create_account(self, m):
register_uris({'account': ['create']}, m)
diff --git a/tests/test_course.py b/tests/test_course.py
index 415f426..fb6e76c 100644
--- a/tests/test_course.py
+++ b/tests/test_course.py
@@ -176,14 +176,16 @@ class TestCourse(unittest.TestCase):
register_uris({'course': ['upload', 'upload_final']}, m)
filename = 'testfile_course_{}'.format(uuid.uuid4().hex)
- with open(filename, 'w+') as file:
- response = self.course.upload(file)
- self.assertTrue(response[0])
- self.assertIsInstance(response[1], dict)
- self.assertIn('url', response[1])
+ try:
+ with open(filename, 'w+') as file:
+ response = self.course.upload(file)
- cleanup_file(filename)
+ self.assertTrue(response[0])
+ self.assertIsInstance(response[1], dict)
+ self.assertIn('url', response[1])
+ finally:
+ cleanup_file(filename)
# reset()
def test_reset(self, m):
diff --git a/tests/test_group.py b/tests/test_group.py
index 526eb23..e948c44 100644
--- a/tests/test_group.py
+++ b/tests/test_group.py
@@ -152,13 +152,14 @@ class TestGroup(unittest.TestCase):
register_uris({'group': ['upload', 'upload_final']}, m)
filename = 'testfile_group_{}'.format(uuid.uuid4().hex)
- with open(filename, 'w+') as file:
- response = self.group.upload(file)
- self.assertTrue(response[0])
- self.assertIsInstance(response[1], dict)
- self.assertIn('url', response[1])
-
- cleanup_file(filename)
+ try:
+ with open(filename, 'w+') as file:
+ response = self.group.upload(file)
+ self.assertTrue(response[0])
+ self.assertIsInstance(response[1], dict)
+ self.assertIn('url', response[1])
+ finally:
+ cleanup_file(filename)
# preview_processed_html()
def test_preview_processed_html(self, m):
diff --git a/tests/test_submission.py b/tests/test_submission.py
index 8a083e0..2ccc29c 100644
--- a/tests/test_submission.py
+++ b/tests/test_submission.py
@@ -37,14 +37,16 @@ class TestSubmission(unittest.TestCase):
register_uris({'submission': ['upload_comment', 'upload_comment_final']}, m)
filename = 'testfile_submission_{}'.format(uuid.uuid4().hex)
- with open(filename, 'w+') as file:
- response = self.submission_course.upload_comment(file)
- self.assertTrue(response[0])
- self.assertIsInstance(response[1], dict)
- self.assertIn('url', response[1])
+ try:
+ with open(filename, 'w+') as file:
+ response = self.submission_course.upload_comment(file)
- cleanup_file(filename)
+ self.assertTrue(response[0])
+ self.assertIsInstance(response[1], dict)
+ self.assertIn('url', response[1])
+ finally:
+ cleanup_file(filename)
def test_upload_comment_section(self, m):
# Sections do not support uploading file comments
diff --git a/tests/test_uploader.py b/tests/test_uploader.py
index a72ecd2..6cea063 100644
--- a/tests/test_uploader.py
+++ b/tests/test_uploader.py
@@ -22,7 +22,6 @@ class TestUploader(unittest.TestCase):
def tearDown(self):
self.file.close()
-
cleanup_file(self.filename)
# start()
diff --git a/tests/test_user.py b/tests/test_user.py
index fa5d5b7..d2ed67c 100644
--- a/tests/test_user.py
+++ b/tests/test_user.py
@@ -203,14 +203,16 @@ class TestUser(unittest.TestCase):
register_uris({'user': ['upload', 'upload_final']}, m)
filename = 'testfile_user_{}'.format(uuid.uuid4().hex)
- with open(filename, 'w+') as file:
- response = self.user.upload(file)
- self.assertTrue(response[0])
- self.assertIsInstance(response[1], dict)
- self.assertIn('url', response[1])
+ try:
+ with open(filename, 'w+') as file:
+ response = self.user.upload(file)
- cleanup_file(filename)
+ self.assertTrue(response[0])
+ self.assertIsInstance(response[1], dict)
+ self.assertIn('url', response[1])
+ finally:
+ cleanup_file(filename)
# list_groups()
def test_list_groups(self, m):
diff --git a/tests/test_util.py b/tests/test_util.py
index 71677d0..472b4f1 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -6,7 +6,9 @@ import requests_mock
from canvasapi import Canvas
from canvasapi.course import CourseNickname
from canvasapi.user import User
-from canvasapi.util import combine_kwargs, is_multivalued, obj_or_id
+from canvasapi.util import (
+ combine_kwargs, get_institution_url, is_multivalued, obj_or_id
+)
from itertools import chain
from six import integer_types, iterkeys, itervalues, iteritems
from six.moves import zip
@@ -408,3 +410,8 @@ class TestUtil(unittest.TestCase):
with self.assertRaises(TypeError):
obj_or_id(nick, 'nickname_id', (CourseNickname,))
+
+ # get_institution_url()
+ def test_get_institution_url(self, m):
+ base_url = 'https://my.canvas.edu/api/v1'
+ self.assertEqual(get_institution_url(base_url), 'https://my.canvas.edu')
diff --git a/tests/util.py b/tests/util.py
index 3b92514..89a2616 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -4,6 +4,7 @@ import os
import requests_mock
+from canvasapi.util import get_institution_url
from tests import settings
@@ -17,7 +18,6 @@ def register_uris(requirements, requests_mocker):
:param requests_mocker: requests_mock.mocker.Mocker
"""
for fixture, objects in requirements.items():
-
try:
with open('tests/fixtures/{}.json'.format(fixture)) as file:
data = json.loads(file.read())
@@ -40,7 +40,7 @@ def register_uris(requirements, requests_mocker):
if obj['endpoint'] == 'ANY':
url = requests_mock.ANY
else:
- url = settings.BASE_URL + obj['endpoint']
+ url = get_institution_url(settings.BASE_URL) + '/api/v1/' + obj['endpoint']
try:
requests_mocker.register_uri(
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"pycodestyle",
"requests_mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ucfopen/canvasapi.git@d265d4a49df7041dca888247f48feda7e288cf15#egg=canvasapi
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
flake8==7.2.0
idna==3.10
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
pytz==2025.2
requests==2.32.3
requests-mock==1.12.1
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- idna==3.10
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- pytz==2025.2
- requests==2.32.3
- requests-mock==1.12.1
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_canvas.py::TestCanvas::test_clear_course_nicknames",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_update",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_updated_fail_on_event",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_updated_fail_on_ids",
"tests/test_canvas.py::TestCanvas::test_conversations_get_running_batches",
"tests/test_canvas.py::TestCanvas::test_conversations_mark_all_as_read",
"tests/test_canvas.py::TestCanvas::test_conversations_unread_count",
"tests/test_canvas.py::TestCanvas::test_create_account",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group_fail_on_context_codes",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group_fail_on_title",
"tests/test_canvas.py::TestCanvas::test_create_calendar_event",
"tests/test_canvas.py::TestCanvas::test_create_calendar_event_fail",
"tests/test_canvas.py::TestCanvas::test_create_conversation",
"tests/test_canvas.py::TestCanvas::test_create_group",
"tests/test_canvas.py::TestCanvas::test_get_account",
"tests/test_canvas.py::TestCanvas::test_get_account_fail",
"tests/test_canvas.py::TestCanvas::test_get_account_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_accounts",
"tests/test_canvas.py::TestCanvas::test_get_activity_stream_summary",
"tests/test_canvas.py::TestCanvas::test_get_appointment_group",
"tests/test_canvas.py::TestCanvas::test_get_calendar_event",
"tests/test_canvas.py::TestCanvas::test_get_conversation",
"tests/test_canvas.py::TestCanvas::test_get_conversations",
"tests/test_canvas.py::TestCanvas::test_get_course",
"tests/test_canvas.py::TestCanvas::test_get_course_accounts",
"tests/test_canvas.py::TestCanvas::test_get_course_fail",
"tests/test_canvas.py::TestCanvas::test_get_course_nickname",
"tests/test_canvas.py::TestCanvas::test_get_course_nickname_fail",
"tests/test_canvas.py::TestCanvas::test_get_course_nicknames",
"tests/test_canvas.py::TestCanvas::test_get_course_non_unicode_char",
"tests/test_canvas.py::TestCanvas::test_get_course_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_course_with_start_date",
"tests/test_canvas.py::TestCanvas::test_get_courses",
"tests/test_canvas.py::TestCanvas::test_get_file",
"tests/test_canvas.py::TestCanvas::test_get_group",
"tests/test_canvas.py::TestCanvas::test_get_group_category",
"tests/test_canvas.py::TestCanvas::test_get_group_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_outcome",
"tests/test_canvas.py::TestCanvas::test_get_outcome_group",
"tests/test_canvas.py::TestCanvas::test_get_root_outcome_group",
"tests/test_canvas.py::TestCanvas::test_get_section",
"tests/test_canvas.py::TestCanvas::test_get_section_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_todo_items",
"tests/test_canvas.py::TestCanvas::test_get_upcoming_events",
"tests/test_canvas.py::TestCanvas::test_get_user",
"tests/test_canvas.py::TestCanvas::test_get_user_by_id_type",
"tests/test_canvas.py::TestCanvas::test_get_user_fail",
"tests/test_canvas.py::TestCanvas::test_get_user_self",
"tests/test_canvas.py::TestCanvas::test_list_appointment_groups",
"tests/test_canvas.py::TestCanvas::test_list_calendar_events",
"tests/test_canvas.py::TestCanvas::test_list_group_participants",
"tests/test_canvas.py::TestCanvas::test_list_user_participants",
"tests/test_canvas.py::TestCanvas::test_reserve_time_slot",
"tests/test_canvas.py::TestCanvas::test_reserve_time_slot_by_participant_id",
"tests/test_canvas.py::TestCanvas::test_search_accounts",
"tests/test_canvas.py::TestCanvas::test_search_all_courses",
"tests/test_canvas.py::TestCanvas::test_search_recipients",
"tests/test_canvas.py::TestCanvas::test_set_course_nickname",
"tests/test_course.py::TestCourse::test__str__",
"tests/test_course.py::TestCourse::test_add_grading_standards",
"tests/test_course.py::TestCourse::test_add_grading_standards_empty_list",
"tests/test_course.py::TestCourse::test_add_grading_standards_missing_name_key",
"tests/test_course.py::TestCourse::test_add_grading_standards_missing_value_key",
"tests/test_course.py::TestCourse::test_add_grading_standards_non_dict_list",
"tests/test_course.py::TestCourse::test_conclude",
"tests/test_course.py::TestCourse::test_course_files",
"tests/test_course.py::TestCourse::test_create_assignment",
"tests/test_course.py::TestCourse::test_create_assignment_fail",
"tests/test_course.py::TestCourse::test_create_assignment_group",
"tests/test_course.py::TestCourse::test_create_course_section",
"tests/test_course.py::TestCourse::test_create_discussion_topic",
"tests/test_course.py::TestCourse::test_create_external_feed",
"tests/test_course.py::TestCourse::test_create_external_tool",
"tests/test_course.py::TestCourse::test_create_folder",
"tests/test_course.py::TestCourse::test_create_group_category",
"tests/test_course.py::TestCourse::test_create_module",
"tests/test_course.py::TestCourse::test_create_module_fail",
"tests/test_course.py::TestCourse::test_create_page",
"tests/test_course.py::TestCourse::test_create_page_fail",
"tests/test_course.py::TestCourse::test_create_quiz",
"tests/test_course.py::TestCourse::test_create_quiz_fail",
"tests/test_course.py::TestCourse::test_delete",
"tests/test_course.py::TestCourse::test_delete_external_feed",
"tests/test_course.py::TestCourse::test_edit_front_page",
"tests/test_course.py::TestCourse::test_enroll_user",
"tests/test_course.py::TestCourse::test_get_assignment",
"tests/test_course.py::TestCourse::test_get_assignment_group",
"tests/test_course.py::TestCourse::test_get_assignments",
"tests/test_course.py::TestCourse::test_get_course_level_assignment_data",
"tests/test_course.py::TestCourse::test_get_course_level_participation_data",
"tests/test_course.py::TestCourse::test_get_course_level_student_summary_data",
"tests/test_course.py::TestCourse::test_get_discussion_topic",
"tests/test_course.py::TestCourse::test_get_discussion_topics",
"tests/test_course.py::TestCourse::test_get_enrollments",
"tests/test_course.py::TestCourse::test_get_external_tool",
"tests/test_course.py::TestCourse::test_get_external_tools",
"tests/test_course.py::TestCourse::test_get_file",
"tests/test_course.py::TestCourse::test_get_folder",
"tests/test_course.py::TestCourse::test_get_full_discussion_topic",
"tests/test_course.py::TestCourse::test_get_grading_standards",
"tests/test_course.py::TestCourse::test_get_module",
"tests/test_course.py::TestCourse::test_get_modules",
"tests/test_course.py::TestCourse::test_get_outcome_group",
"tests/test_course.py::TestCourse::test_get_outcome_groups_in_context",
"tests/test_course.py::TestCourse::test_get_outcome_links_in_context",
"tests/test_course.py::TestCourse::test_get_outcome_result_rollups",
"tests/test_course.py::TestCourse::test_get_outcome_results",
"tests/test_course.py::TestCourse::test_get_page",
"tests/test_course.py::TestCourse::test_get_pages",
"tests/test_course.py::TestCourse::test_get_quiz",
"tests/test_course.py::TestCourse::test_get_quiz_fail",
"tests/test_course.py::TestCourse::test_get_quizzes",
"tests/test_course.py::TestCourse::test_get_recent_students",
"tests/test_course.py::TestCourse::test_get_root_outcome_group",
"tests/test_course.py::TestCourse::test_get_section",
"tests/test_course.py::TestCourse::test_get_settings",
"tests/test_course.py::TestCourse::test_get_single_grading_standard",
"tests/test_course.py::TestCourse::test_get_submission",
"tests/test_course.py::TestCourse::test_get_user",
"tests/test_course.py::TestCourse::test_get_user_id_type",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_assignment_data",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_messaging_data",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_participation_data",
"tests/test_course.py::TestCourse::test_get_users",
"tests/test_course.py::TestCourse::test_list_assignment_groups",
"tests/test_course.py::TestCourse::test_list_external_feeds",
"tests/test_course.py::TestCourse::test_list_folders",
"tests/test_course.py::TestCourse::test_list_gradeable_students",
"tests/test_course.py::TestCourse::test_list_group_categories",
"tests/test_course.py::TestCourse::test_list_groups",
"tests/test_course.py::TestCourse::test_list_multiple_submissions",
"tests/test_course.py::TestCourse::test_list_multiple_submissions_grouped_param",
"tests/test_course.py::TestCourse::test_list_sections",
"tests/test_course.py::TestCourse::test_list_submissions",
"tests/test_course.py::TestCourse::test_list_tabs",
"tests/test_course.py::TestCourse::test_mark_submission_as_read",
"tests/test_course.py::TestCourse::test_mark_submission_as_unread",
"tests/test_course.py::TestCourse::test_preview_html",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics_comma_separated_string",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics_invalid_input",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics_tuple",
"tests/test_course.py::TestCourse::test_reset",
"tests/test_course.py::TestCourse::test_show_front_page",
"tests/test_course.py::TestCourse::test_subit_assignment_fail",
"tests/test_course.py::TestCourse::test_submit_assignment",
"tests/test_course.py::TestCourse::test_update",
"tests/test_course.py::TestCourse::test_update_settings",
"tests/test_course.py::TestCourse::test_update_submission",
"tests/test_course.py::TestCourse::test_update_tab",
"tests/test_course.py::TestCourse::test_upload",
"tests/test_course.py::TestCourseNickname::test__str__",
"tests/test_course.py::TestCourseNickname::test_remove",
"tests/test_group.py::TestGroup::test__str__",
"tests/test_group.py::TestGroup::test_create_discussion_topic",
"tests/test_group.py::TestGroup::test_create_external_feed",
"tests/test_group.py::TestGroup::test_create_folder",
"tests/test_group.py::TestGroup::test_create_membership",
"tests/test_group.py::TestGroup::test_create_page",
"tests/test_group.py::TestGroup::test_create_page_fail",
"tests/test_group.py::TestGroup::test_delete",
"tests/test_group.py::TestGroup::test_delete_external_feed",
"tests/test_group.py::TestGroup::test_edit",
"tests/test_group.py::TestGroup::test_edit_front_page",
"tests/test_group.py::TestGroup::test_get_activity_stream_summary",
"tests/test_group.py::TestGroup::test_get_discussion_topic",
"tests/test_group.py::TestGroup::test_get_discussion_topics",
"tests/test_group.py::TestGroup::test_get_file",
"tests/test_group.py::TestGroup::test_get_folder",
"tests/test_group.py::TestGroup::test_get_full_discussion_topic",
"tests/test_group.py::TestGroup::test_get_membership",
"tests/test_group.py::TestGroup::test_get_page",
"tests/test_group.py::TestGroup::test_get_pages",
"tests/test_group.py::TestGroup::test_group_files",
"tests/test_group.py::TestGroup::test_invite",
"tests/test_group.py::TestGroup::test_list_external_feeds",
"tests/test_group.py::TestGroup::test_list_folders",
"tests/test_group.py::TestGroup::test_list_memberships",
"tests/test_group.py::TestGroup::test_list_tabs",
"tests/test_group.py::TestGroup::test_list_users",
"tests/test_group.py::TestGroup::test_preview_processed_html",
"tests/test_group.py::TestGroup::test_remove_user",
"tests/test_group.py::TestGroup::test_reorder_pinned_topics",
"tests/test_group.py::TestGroup::test_reorder_pinned_topics_comma_separated_string",
"tests/test_group.py::TestGroup::test_reorder_pinned_topics_invalid_input",
"tests/test_group.py::TestGroup::test_reorder_pinned_topics_tuple",
"tests/test_group.py::TestGroup::test_show_front_page",
"tests/test_group.py::TestGroup::test_update_membership",
"tests/test_group.py::TestGroup::test_upload",
"tests/test_group.py::TestGroupMembership::test__str__",
"tests/test_group.py::TestGroupMembership::test_remove_self",
"tests/test_group.py::TestGroupMembership::test_remove_user",
"tests/test_group.py::TestGroupMembership::test_update",
"tests/test_group.py::TestGroupCategory::test__str__",
"tests/test_group.py::TestGroupCategory::test_assign_members",
"tests/test_group.py::TestGroupCategory::test_create_group",
"tests/test_group.py::TestGroupCategory::test_delete_category",
"tests/test_group.py::TestGroupCategory::test_list_groups",
"tests/test_group.py::TestGroupCategory::test_list_users",
"tests/test_group.py::TestGroupCategory::test_update",
"tests/test_submission.py::TestSubmission::test__str__",
"tests/test_submission.py::TestSubmission::test_upload_comment",
"tests/test_submission.py::TestSubmission::test_upload_comment_section",
"tests/test_uploader.py::TestUploader::test_start",
"tests/test_uploader.py::TestUploader::test_start_file_does_not_exist",
"tests/test_uploader.py::TestUploader::test_start_path",
"tests/test_uploader.py::TestUploader::test_upload_fail",
"tests/test_uploader.py::TestUploader::test_upload_no_upload_params",
"tests/test_uploader.py::TestUploader::test_upload_no_upload_url",
"tests/test_user.py::TestUser::test__str__",
"tests/test_user.py::TestUser::test_add_observee",
"tests/test_user.py::TestUser::test_add_observee_with_credentials",
"tests/test_user.py::TestUser::test_create_bookmark",
"tests/test_user.py::TestUser::test_create_folder",
"tests/test_user.py::TestUser::test_edit",
"tests/test_user.py::TestUser::test_get_avatars",
"tests/test_user.py::TestUser::test_get_bookmark",
"tests/test_user.py::TestUser::test_get_color",
"tests/test_user.py::TestUser::test_get_colors",
"tests/test_user.py::TestUser::test_get_courses",
"tests/test_user.py::TestUser::test_get_file",
"tests/test_user.py::TestUser::test_get_folder",
"tests/test_user.py::TestUser::test_get_missing_submissions",
"tests/test_user.py::TestUser::test_get_page_views",
"tests/test_user.py::TestUser::test_get_profile",
"tests/test_user.py::TestUser::test_list_bookmarks",
"tests/test_user.py::TestUser::test_list_calendar_events_for_user",
"tests/test_user.py::TestUser::test_list_communication_channels",
"tests/test_user.py::TestUser::test_list_enrollments",
"tests/test_user.py::TestUser::test_list_folders",
"tests/test_user.py::TestUser::test_list_groups",
"tests/test_user.py::TestUser::test_list_observees",
"tests/test_user.py::TestUser::test_list_user_logins",
"tests/test_user.py::TestUser::test_merge_into_id",
"tests/test_user.py::TestUser::test_merge_into_user",
"tests/test_user.py::TestUser::test_remove_observee",
"tests/test_user.py::TestUser::test_show_observee",
"tests/test_user.py::TestUser::test_update_color",
"tests/test_user.py::TestUser::test_update_color_no_hashtag",
"tests/test_user.py::TestUser::test_update_settings",
"tests/test_user.py::TestUser::test_upload",
"tests/test_user.py::TestUser::test_user_files",
"tests/test_user.py::TestUser::test_user_get_assignments",
"tests/test_user.py::TestUserDisplay::test__str__",
"tests/test_util.py::TestUtil::test_combine_kwargs_empty",
"tests/test_util.py::TestUtil::test_combine_kwargs_multiple_dicts",
"tests/test_util.py::TestUtil::test_combine_kwargs_multiple_mixed",
"tests/test_util.py::TestUtil::test_combine_kwargs_multiple_nested_dicts",
"tests/test_util.py::TestUtil::test_combine_kwargs_nested_dict",
"tests/test_util.py::TestUtil::test_combine_kwargs_single",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_dict",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_generator_empty",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_generator_multiple_items",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_generator_single_item",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_list_empty",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_list_multiple_items",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_list_single_item",
"tests/test_util.py::TestUtil::test_combine_kwargs_super_nested_dict",
"tests/test_util.py::TestUtil::test_combine_kwargs_the_gauntlet",
"tests/test_util.py::TestUtil::test_get_institution_url",
"tests/test_util.py::TestUtil::test_is_multivalued_bool",
"tests/test_util.py::TestUtil::test_is_multivalued_bytes",
"tests/test_util.py::TestUtil::test_is_multivalued_chain",
"tests/test_util.py::TestUtil::test_is_multivalued_dict",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_items",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_keys",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_values",
"tests/test_util.py::TestUtil::test_is_multivalued_generator_call",
"tests/test_util.py::TestUtil::test_is_multivalued_generator_expr",
"tests/test_util.py::TestUtil::test_is_multivalued_integer_types",
"tests/test_util.py::TestUtil::test_is_multivalued_list",
"tests/test_util.py::TestUtil::test_is_multivalued_list_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_set",
"tests/test_util.py::TestUtil::test_is_multivalued_set_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_str",
"tests/test_util.py::TestUtil::test_is_multivalued_tuple",
"tests/test_util.py::TestUtil::test_is_multivalued_tuple_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_unicode",
"tests/test_util.py::TestUtil::test_is_multivalued_zip",
"tests/test_util.py::TestUtil::test_obj_or_id_int",
"tests/test_util.py::TestUtil::test_obj_or_id_obj",
"tests/test_util.py::TestUtil::test_obj_or_id_obj_no_id",
"tests/test_util.py::TestUtil::test_obj_or_id_str_invalid",
"tests/test_util.py::TestUtil::test_obj_or_id_str_valid"
] | [
"tests/test_canvas.py::TestCanvas::test_init_deprecate_url_contains_version"
] | [] | [] | MIT License | 1,922 | 590 | [
"canvasapi/canvas.py",
"canvasapi/util.py"
] |
|
samkohn__larpix-control-67 | 726cfac3948552f082bb645a6f7cdcf7d5c8cfee | 2017-11-29 09:39:51 | eb91fe77d6458a579f2b8ef362cc9e42ca1e89f1 | samkohn: @dadwyer since you proposed this idea I want to make sure you get the chance to look over the changes before I merge them in. | diff --git a/larpix/larpix.py b/larpix/larpix.py
index f1f7684..b337ec6 100644
--- a/larpix/larpix.py
+++ b/larpix/larpix.py
@@ -710,11 +710,31 @@ class Controller(object):
'''
Controls a collection of LArPix Chip objects.
+ Properties and attributes:
+
+ - ``chips``: the ``Chip`` objects that the controller controls
+ - ``all_chip``: all possible ``Chip`` objects (considering there are
+ a finite number of chip IDs), initialized on object construction
+ - ``port``: the path to the serial port, i.e. "/dev/(whatever)"
+ (default: ``'/dev/ttyUSB1'``)
+ - ``timeout``: the timeout used for serial commands, in seconds.
+ This can be changed between calls to the read and write commands.
+ (default: ``1``)
+ - ``reads``: list of all the PacketCollections that have been sent
+ back to this controller. PacketCollections are created by
+ ``run``, ``write_configuration``, and ``read_configuration``, but
+ not by any of the ``serial_*`` methods.
+ - ``use_all_chips``: if ``True``, look up chip objects in
+ ``self.all_chips``, else look up in ``self.chips`` (default:
+ ``False``)
+
'''
start_byte = b'\x73'
stop_byte = b'\x71'
def __init__(self, port='/dev/ttyUSB1'):
self.chips = []
+ self.all_chips = self._init_chips()
+ self.use_all_chips = False
self.reads = []
self.nreads = 0
self.port = port
@@ -723,15 +743,23 @@ class Controller(object):
self.max_write = 8192
self._serial = serial.Serial
- def init_chips(self, nchips = 256, iochain = 0):
- self.chips = [Chip(i, iochain) for i in range(256)]
+ def _init_chips(self, nchips = 256, iochain = 0):
+ '''
+ Return all possible chips.
+
+ '''
+ return [Chip(i, iochain) for i in range(256)]
def get_chip(self, chip_id, io_chain):
- for chip in self.chips:
+ if self.use_all_chips:
+ chip_list = self.all_chips
+ else:
+ chip_list = self.chips
+ for chip in chip_list:
if chip.chip_id == chip_id and chip.io_chain == io_chain:
return chip
- raise ValueError('Could not find chip (%d, %d)' % (chip_id,
- io_chain))
+ raise ValueError('Could not find chip (%d, %d) (using all_chips'
+ '? %s)' % (chip_id, io_chain, self.use_all_chips))
def serial_flush(self):
with self._serial(self.port, baudrate=self.baudrate,
diff --git a/larpix/tasks.py b/larpix/tasks.py
index a01da07..abace8a 100644
--- a/larpix/tasks.py
+++ b/larpix/tasks.py
@@ -58,15 +58,13 @@ def get_chip_ids(**settings):
logger.info('Executing get_chip_ids')
if 'controller' in settings:
controller = settings['controller']
- if not controller.chips:
- controller.init_chips()
else:
controller = larpix.Controller(settings['port'])
- controller.init_chips()
+ controller.use_all_chips = True
stored_timeout = controller.timeout
controller.timeout=0.1
chips = []
- for chip in controller.chips:
+ for chip in controller.all_chips:
controller.read_configuration(chip, 0, timeout=0.1)
if len(chip.reads) == 0:
print('Chip ID %d: Packet lost in black hole. No connection?' %
@@ -81,6 +79,7 @@ def get_chip_ids(**settings):
chips.append(chip)
logger.info('Found chip %s' % chip)
controller.timeout = stored_timeout
+ controller.use_all_chips = False
return chips
def simple_stats(**settings):
| Separate the functionality of "init_chips" (broadcasting) from the list of known actual chips
Controller objects should know which chips actually exist and which are just there for command broadcasting. Proposed revision:
- `controller.chips` is for known chips
- `controller.all_chips` is for all possible chips (i.e. all chip IDs combined with all known daisy chains)
- `controller.init_chips` becomes `controller._init_chips` and is called automatically during initialization to initialize `controller.all_chips` | samkohn/larpix-control | diff --git a/test/test_larpix.py b/test/test_larpix.py
index b22de84..af9e035 100644
--- a/test/test_larpix.py
+++ b/test/test_larpix.py
@@ -1461,9 +1461,8 @@ def test_configuration_from_dict_reg_reset_cycles():
def test_controller_init_chips():
controller = Controller(None)
- controller.init_chips()
- result = list(map(str, controller.chips))
- expected = list(map(str, (Chip(i, 0) for i in range(256))))
+ result = list(map(repr, controller._init_chips()))
+ expected = list(map(repr, (Chip(i, 0) for i in range(256))))
assert result == expected
def test_controller_get_chip():
@@ -1472,6 +1471,13 @@ def test_controller_get_chip():
controller.chips.append(chip)
assert controller.get_chip(1, 3) == chip
+def test_controller_get_chip_all_chips():
+ controller = Controller(None)
+ controller.use_all_chips = True
+ result = controller.get_chip(5, 0)
+ expected = controller.all_chips[5]
+ assert result == expected
+
def test_controller_get_chip_error():
controller = Controller(None)
chip = Chip(1, 3)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 2
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bitarray==3.3.0
bitstring==4.3.1
exceptiongroup==1.2.2
iniconfig==2.1.0
-e git+https://github.com/samkohn/larpix-control.git@726cfac3948552f082bb645a6f7cdcf7d5c8cfee#egg=larpix_control
packaging==24.2
pluggy==1.5.0
pyserial==3.5
pytest==8.3.5
tomli==2.2.1
| name: larpix-control
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bitarray==3.3.0
- bitstring==4.3.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pyserial==3.5
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/larpix-control
| [
"test/test_larpix.py::test_controller_init_chips",
"test/test_larpix.py::test_controller_get_chip_all_chips"
] | [
"test/test_larpix.py::test_configuration_set_pixel_trim_thresholds_errors",
"test/test_larpix.py::test_configuration_set_global_threshold_errors",
"test/test_larpix.py::test_configuration_set_csa_gain_errors",
"test/test_larpix.py::test_configuration_set_csa_bypass_errors",
"test/test_larpix.py::test_configuration_set_internal_bypass_errors",
"test/test_larpix.py::test_configuration_set_csa_bypass_select_errors",
"test/test_larpix.py::test_configuration_set_csa_monitor_select_errors",
"test/test_larpix.py::test_configuration_set_csa_testpulse_enable_errors",
"test/test_larpix.py::test_configuration_set_csa_testpulse_dac_amplitude_errors",
"test/test_larpix.py::test_configuration_set_test_mode_errors",
"test/test_larpix.py::test_configuration_set_cross_trigger_mode_errors",
"test/test_larpix.py::test_configuration_set_periodic_reset_errors",
"test/test_larpix.py::test_configuration_set_fifo_diagnostic_errors",
"test/test_larpix.py::test_configuration_set_test_burst_length_errors",
"test/test_larpix.py::test_configuration_set_adc_burst_length_errors",
"test/test_larpix.py::test_configuration_set_channel_mask_errors",
"test/test_larpix.py::test_configuration_set_external_trigger_mask_errors",
"test/test_larpix.py::test_configuration_set_reset_cycles_errors",
"test/test_larpix.py::test_configuration_write_errors",
"test/test_larpix.py::test_controller_get_chip_error",
"test/test_larpix.py::test_controller_format_UART",
"test/test_larpix.py::test_controller_format_bytestream",
"test/test_larpix.py::test_controller_write_configuration",
"test/test_larpix.py::test_controller_write_configuration_one_reg",
"test/test_larpix.py::test_controller_write_configuration_write_read",
"test/test_larpix.py::test_controller_get_configuration_bytestreams",
"test/test_larpix.py::test_controller_parse_input",
"test/test_larpix.py::test_controller_parse_input_dropped_data_byte",
"test/test_larpix.py::test_controller_parse_input_dropped_start_byte",
"test/test_larpix.py::test_controller_parse_input_dropped_stop_byte",
"test/test_larpix.py::test_controller_parse_input_dropped_stopstart_bytes"
] | [
"test/test_larpix.py::test_FakeSerialPort_write",
"test/test_larpix.py::test_FakeSerialPort_read",
"test/test_larpix.py::test_FakeSerialPort_read_multi",
"test/test_larpix.py::test_chip_str",
"test/test_larpix.py::test_chip_get_configuration_packets",
"test/test_larpix.py::test_chip_sync_configuration",
"test/test_larpix.py::test_chip_export_reads",
"test/test_larpix.py::test_chip_export_reads_no_new_reads",
"test/test_larpix.py::test_chip_export_reads_all",
"test/test_larpix.py::test_controller_save_output",
"test/test_larpix.py::test_controller_load",
"test/test_larpix.py::test_packet_bits_bytes",
"test/test_larpix.py::test_packet_init_default",
"test/test_larpix.py::test_packet_init_bytestream",
"test/test_larpix.py::test_packet_bytes_zeros",
"test/test_larpix.py::test_packet_bytes_custom",
"test/test_larpix.py::test_packet_bytes_properties",
"test/test_larpix.py::test_packet_export_test",
"test/test_larpix.py::test_packet_export_data",
"test/test_larpix.py::test_packet_export_config_read",
"test/test_larpix.py::test_packet_export_config_write",
"test/test_larpix.py::test_packet_set_packet_type",
"test/test_larpix.py::test_packet_get_packet_type",
"test/test_larpix.py::test_packet_set_chipid",
"test/test_larpix.py::test_packet_get_chipid",
"test/test_larpix.py::test_packet_set_parity_bit_value",
"test/test_larpix.py::test_packet_get_parity_bit_value",
"test/test_larpix.py::test_packet_compute_parity",
"test/test_larpix.py::test_packet_assign_parity",
"test/test_larpix.py::test_packet_has_valid_parity",
"test/test_larpix.py::test_packet_set_channel_id",
"test/test_larpix.py::test_packet_get_channel_id",
"test/test_larpix.py::test_packet_set_timestamp",
"test/test_larpix.py::test_packet_get_timestamp",
"test/test_larpix.py::test_packet_set_dataword",
"test/test_larpix.py::test_packet_get_dataword",
"test/test_larpix.py::test_packet_get_dataword_ADC_bug",
"test/test_larpix.py::test_packet_set_fifo_half_flag",
"test/test_larpix.py::test_packet_get_fifo_half_flag",
"test/test_larpix.py::test_packet_set_fifo_full_flag",
"test/test_larpix.py::test_packet_get_fifo_full_flag",
"test/test_larpix.py::test_packet_set_register_address",
"test/test_larpix.py::test_packet_get_register_address",
"test/test_larpix.py::test_packet_set_register_data",
"test/test_larpix.py::test_packet_get_register_data",
"test/test_larpix.py::test_packet_set_test_counter",
"test/test_larpix.py::test_packet_get_test_counter",
"test/test_larpix.py::test_configuration_get_nondefault_registers",
"test/test_larpix.py::test_configuration_get_nondefault_registers_array",
"test/test_larpix.py::test_configuration_get_nondefault_registers_many_changes",
"test/test_larpix.py::test_configuration_set_pixel_trim_thresholds",
"test/test_larpix.py::test_configuration_get_pixel_trim_thresholds",
"test/test_larpix.py::test_configuration_set_global_threshold",
"test/test_larpix.py::test_configuration_get_global_threshold",
"test/test_larpix.py::test_configuration_set_csa_gain",
"test/test_larpix.py::test_configuration_get_csa_gain",
"test/test_larpix.py::test_configuration_set_csa_bypass",
"test/test_larpix.py::test_configuration_get_csa_bypass",
"test/test_larpix.py::test_configuration_set_internal_bypass",
"test/test_larpix.py::test_configuration_get_internal_bypass",
"test/test_larpix.py::test_configuration_set_csa_bypass_select",
"test/test_larpix.py::test_configuration_get_csa_bypass_select",
"test/test_larpix.py::test_configuration_set_csa_monitor_select",
"test/test_larpix.py::test_configuration_get_csa_monitor_select",
"test/test_larpix.py::test_configuration_set_csa_testpulse_enable",
"test/test_larpix.py::test_configuration_get_csa_testpulse_enable",
"test/test_larpix.py::test_configuration_set_csa_testpulse_dac_amplitude",
"test/test_larpix.py::test_configuration_get_csa_testpulse_dac_amplitude",
"test/test_larpix.py::test_configuration_set_test_mode",
"test/test_larpix.py::test_configuration_get_test_mode",
"test/test_larpix.py::test_configuration_set_cross_trigger_mode",
"test/test_larpix.py::test_configuration_get_cross_trigger_mode",
"test/test_larpix.py::test_configuration_set_periodic_reset",
"test/test_larpix.py::test_configuration_get_periodic_reset",
"test/test_larpix.py::test_configuration_set_fifo_diagnostic",
"test/test_larpix.py::test_configuration_get_fifo_diagnostic",
"test/test_larpix.py::test_configuration_set_test_burst_length",
"test/test_larpix.py::test_configuration_get_test_burst_length",
"test/test_larpix.py::test_configuration_set_adc_burst_length",
"test/test_larpix.py::test_configuration_get_adc_burst_length",
"test/test_larpix.py::test_configuration_set_channel_mask",
"test/test_larpix.py::test_configuration_get_channel_mask",
"test/test_larpix.py::test_configuration_set_external_trigger_mask",
"test/test_larpix.py::test_configuration_get_external_trigger_mask",
"test/test_larpix.py::test_configuration_set_reset_cycles",
"test/test_larpix.py::test_configuration_get_reset_cycles",
"test/test_larpix.py::test_configuration_disable_channels",
"test/test_larpix.py::test_configuration_disable_channels_default",
"test/test_larpix.py::test_configuration_enable_channels",
"test/test_larpix.py::test_configuration_enable_channels_default",
"test/test_larpix.py::test_configuration_enable_external_trigger",
"test/test_larpix.py::test_configuration_enable_external_trigger_default",
"test/test_larpix.py::test_configuration_disable_external_trigger",
"test/test_larpix.py::test_configuration_enable_testpulse",
"test/test_larpix.py::test_configuration_enable_testpulse_default",
"test/test_larpix.py::test_configuration_disable_testpulse",
"test/test_larpix.py::test_configuration_disable_testpulse_default",
"test/test_larpix.py::test_configuration_enable_analog_monitor",
"test/test_larpix.py::test_configuration_disable_analog_monitor",
"test/test_larpix.py::test_configuration_trim_threshold_data",
"test/test_larpix.py::test_configuration_global_threshold_data",
"test/test_larpix.py::test_configuration_csa_gain_and_bypasses_data",
"test/test_larpix.py::test_configuration_csa_bypass_select_data",
"test/test_larpix.py::test_configuration_csa_monitor_select_data",
"test/test_larpix.py::test_configuration_csa_testpulse_enable_data",
"test/test_larpix.py::test_configuration_csa_testpulse_dac_amplitude_data",
"test/test_larpix.py::test_configuration_test_mode_xtrig_reset_diag_data",
"test/test_larpix.py::test_configuration_sample_cycles_data",
"test/test_larpix.py::test_configuration_test_burst_length_data",
"test/test_larpix.py::test_configuration_adc_burst_length_data",
"test/test_larpix.py::test_configuration_channel_mask_data",
"test/test_larpix.py::test_configuration_external_trigger_mask_data",
"test/test_larpix.py::test_configuration_reset_cycles_data",
"test/test_larpix.py::test_configuration_to_dict",
"test/test_larpix.py::test_configuration_from_dict",
"test/test_larpix.py::test_configuration_write",
"test/test_larpix.py::test_configuration_write_force",
"test/test_larpix.py::test_configuration_read_absolute",
"test/test_larpix.py::test_configuration_read_default",
"test/test_larpix.py::test_configuration_read_local",
"test/test_larpix.py::test_configuration_from_dict_reg_pixel_trim",
"test/test_larpix.py::test_configuration_from_dict_reg_global_threshold",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_gain",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_bypass",
"test/test_larpix.py::test_configuration_from_dict_reg_internal_bypass",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_bypass_select",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_monitor_select",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_testpulse_enable",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_testpulse_dac_amplitude",
"test/test_larpix.py::test_configuration_from_dict_reg_test_mode",
"test/test_larpix.py::test_configuration_from_dict_reg_cross_trigger_mode",
"test/test_larpix.py::test_configuration_from_dict_reg_periodic_reset",
"test/test_larpix.py::test_configuration_from_dict_reg_fifo_diagnostic",
"test/test_larpix.py::test_configuration_from_dict_reg_sample_cycles",
"test/test_larpix.py::test_configuration_from_dict_reg_test_burst_length",
"test/test_larpix.py::test_configuration_from_dict_reg_adc_burst_length",
"test/test_larpix.py::test_configuration_from_dict_reg_channel_mask",
"test/test_larpix.py::test_configuration_from_dict_reg_external_trigger_mask",
"test/test_larpix.py::test_configuration_from_dict_reg_reset_cycles",
"test/test_larpix.py::test_controller_get_chip",
"test/test_larpix.py::test_controller_serial_read_mock",
"test/test_larpix.py::test_controller_serial_write_mock",
"test/test_larpix.py::test_controller_serial_write_read_mock",
"test/test_larpix.py::test_packetcollection_getitem_int",
"test/test_larpix.py::test_packetcollection_getitem_int_bits",
"test/test_larpix.py::test_packetcollection_getitem_slice",
"test/test_larpix.py::test_packetcollection_getitem_slice_bits",
"test/test_larpix.py::test_packetcollection_origin",
"test/test_larpix.py::test_packetcollection_to_dict",
"test/test_larpix.py::test_packetcollection_from_dict"
] | [] | null | 1,925 | 1,030 | [
"larpix/larpix.py",
"larpix/tasks.py"
] |
lepture__mistune-143 | cef69acaa506567595e95ab6ecea25a806de622e | 2017-11-29 17:11:39 | c674108105e3419c6bfdf247c6082a9c6f5852fb | diff --git a/mistune.py b/mistune.py
index 5b05fcb..175ff01 100644
--- a/mistune.py
+++ b/mistune.py
@@ -503,7 +503,7 @@ class InlineLexer(object):
'linebreak', 'strikethrough', 'text',
]
inline_html_rules = [
- 'escape', 'autolink', 'url', 'link', 'reflink',
+ 'escape', 'inline_html', 'autolink', 'url', 'link', 'reflink',
'nolink', 'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
| nested html links are broken with parse_block_html
this is a variant of #81
example_in = '<div><a href="https://example.com">Example.com</a></div>'
mistune.markdown(example_in, escape=False, parse_block_html=True)
will generate:
<div><a href="<a href="https://example.com">Example.com">https://example.com">Example.com</a></a></div>
if escape is toggled to True, it is also broken:
'<div><a href="<a href="https://example.com">Example.com">https://example.com">Example.com</a></a></div>
| lepture/mistune | diff --git a/tests/test_extra.py b/tests/test_extra.py
index c1d37d9..3cb9799 100644
--- a/tests/test_extra.py
+++ b/tests/test_extra.py
@@ -116,6 +116,21 @@ def test_parse_block_html():
assert '<strong>' not in ret
+def test_parse_nested_html():
+ ret = mistune.markdown(
+ '<div><a href="http://example.org">**foo**</a></div>',
+ parse_block_html=True, escape=False
+ )
+ assert '<div><a href="http://example.org">' in ret
+ assert '<strong>' not in ret
+
+ ret = mistune.markdown(
+ '<div><a href="http://example.org">**foo**</a></div>',
+ parse_block_html=True, parse_inline_html=True, escape=False
+ )
+ assert '<div><a href="http://example.org"><strong>' in ret
+
+
def test_trigger_more_cases():
markdown = mistune.Markdown(
inline=mistune.InlineLexer,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/lepture/mistune.git@cef69acaa506567595e95ab6ecea25a806de622e#egg=mistune
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mistune
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
prefix: /opt/conda/envs/mistune
| [
"tests/test_extra.py::test_parse_nested_html"
] | [] | [
"tests/test_extra.py::test_escape",
"tests/test_extra.py::test_linebreak",
"tests/test_extra.py::test_safe_links",
"tests/test_extra.py::test_skip_style",
"tests/test_extra.py::test_use_xhtml",
"tests/test_extra.py::test_parse_inline_html",
"tests/test_extra.py::test_block_html",
"tests/test_extra.py::test_parse_block_html",
"tests/test_extra.py::test_trigger_more_cases",
"tests/test_extra.py::test_not_escape_block_tags",
"tests/test_extra.py::test_not_escape_inline_tags",
"tests/test_extra.py::test_hard_wrap_renderer"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,928 | 169 | [
"mistune.py"
] |
|
samkohn__larpix-control-74 | 345deb77ced183828f951008ad804a06819ccd52 | 2017-11-30 10:39:37 | eb91fe77d6458a579f2b8ef362cc9e42ca1e89f1 | diff --git a/larpix/larpix.py b/larpix/larpix.py
index d4ae36b..a0efae8 100644
--- a/larpix/larpix.py
+++ b/larpix/larpix.py
@@ -1332,7 +1332,7 @@ class PacketCollection(object):
d['message'] = str(self.message)
d['read_id'] = 'None' if self.read_id is None else self.read_id
d['bytestream'] = ('None' if self.bytestream is None else
- self.bytestream.decode('utf-8'))
+ self.bytestream.decode('raw_unicode_escape'))
return d
def from_dict(self, d):
@@ -1342,7 +1342,7 @@ class PacketCollection(object):
'''
self.message = d['message']
self.read_id = d['read_id']
- self.bytestream = d['bytestream'].encode('utf-8')
+ self.bytestream = d['bytestream'].encode('raw_unicode_escape')
self.parent = None
self.packets = []
for p in d['packets']:
| Some bytestreams don't parse well to JSON in Python 2 (PacketCollection.to_dict)
``UnicodeDecodeError: 'utf8' codec can't decode byte 0xd8 in position 1: invalid continuation byte``. Indeed byte ``b"\xd8"`` is in position 1. I think I'll need to insert some Python version logic into the to_dict and from_dict methods. | samkohn/larpix-control | diff --git a/test/test_larpix.py b/test/test_larpix.py
index af9e035..8c5165b 100644
--- a/test/test_larpix.py
+++ b/test/test_larpix.py
@@ -189,7 +189,7 @@ def test_controller_save_output(tmpdir):
'parent': 'None',
'message': 'hi',
'read_id': 0,
- 'bytestream': p.bytes().decode('utf-8')
+ 'bytestream': p.bytes().decode('raw_unicode_escape')
}
]
}
@@ -1699,6 +1699,7 @@ def test_packetcollection_origin():
def test_packetcollection_to_dict():
packet = Packet()
+ packet.chipid = 246
packet.packet_type = Packet.TEST_PACKET
collection = PacketCollection([packet], bytestream=packet.bytes(),
message='hello')
@@ -1708,11 +1709,11 @@ def test_packetcollection_to_dict():
'parent': 'None',
'message': 'hello',
'read_id': 'None',
- 'bytestream': packet.bytes().decode('utf-8'),
+ 'bytestream': packet.bytes().decode('raw_unicode_escape'),
'packets': [{
'bits': packet.bits.bin,
'type': 'test',
- 'chipid': 0,
+ 'chipid': packet.chipid,
'parity': 0,
'valid_parity': True,
'counter': 0
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pyserial bitstring pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bitarray @ file:///home/conda/feedstock_root/build_artifacts/bitarray_1741686834964/work
bitstring @ file:///home/conda/feedstock_root/build_artifacts/bitstring_1742657504808/work
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/samkohn/larpix-control.git@345deb77ced183828f951008ad804a06819ccd52#egg=larpix_control
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pyserial @ file:///croot/pyserial_1736540546229/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: larpix-control
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bitarray=3.1.1=py39h8cd3c5a_0
- bitstring=4.3.1=pyhd8ed1ab_0
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgomp=14.2.0=h767d61c_2
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pyserial=3.5=py39h06a4308_2
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- python_abi=3.9=2_cp39
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/larpix-control
| [
"test/test_larpix.py::test_packetcollection_to_dict"
] | [
"test/test_larpix.py::test_configuration_set_pixel_trim_thresholds_errors",
"test/test_larpix.py::test_configuration_set_global_threshold_errors",
"test/test_larpix.py::test_configuration_set_csa_gain_errors",
"test/test_larpix.py::test_configuration_set_csa_bypass_errors",
"test/test_larpix.py::test_configuration_set_internal_bypass_errors",
"test/test_larpix.py::test_configuration_set_csa_bypass_select_errors",
"test/test_larpix.py::test_configuration_set_csa_monitor_select_errors",
"test/test_larpix.py::test_configuration_set_csa_testpulse_enable_errors",
"test/test_larpix.py::test_configuration_set_csa_testpulse_dac_amplitude_errors",
"test/test_larpix.py::test_configuration_set_test_mode_errors",
"test/test_larpix.py::test_configuration_set_cross_trigger_mode_errors",
"test/test_larpix.py::test_configuration_set_periodic_reset_errors",
"test/test_larpix.py::test_configuration_set_fifo_diagnostic_errors",
"test/test_larpix.py::test_configuration_set_test_burst_length_errors",
"test/test_larpix.py::test_configuration_set_adc_burst_length_errors",
"test/test_larpix.py::test_configuration_set_channel_mask_errors",
"test/test_larpix.py::test_configuration_set_external_trigger_mask_errors",
"test/test_larpix.py::test_configuration_set_reset_cycles_errors",
"test/test_larpix.py::test_configuration_write_errors",
"test/test_larpix.py::test_controller_get_chip_error",
"test/test_larpix.py::test_controller_format_UART",
"test/test_larpix.py::test_controller_format_bytestream",
"test/test_larpix.py::test_controller_write_configuration",
"test/test_larpix.py::test_controller_write_configuration_one_reg",
"test/test_larpix.py::test_controller_write_configuration_write_read",
"test/test_larpix.py::test_controller_get_configuration_bytestreams",
"test/test_larpix.py::test_controller_parse_input",
"test/test_larpix.py::test_controller_parse_input_dropped_data_byte",
"test/test_larpix.py::test_controller_parse_input_dropped_start_byte",
"test/test_larpix.py::test_controller_parse_input_dropped_stop_byte",
"test/test_larpix.py::test_controller_parse_input_dropped_stopstart_bytes"
] | [
"test/test_larpix.py::test_FakeSerialPort_write",
"test/test_larpix.py::test_FakeSerialPort_read",
"test/test_larpix.py::test_FakeSerialPort_read_multi",
"test/test_larpix.py::test_chip_str",
"test/test_larpix.py::test_chip_get_configuration_packets",
"test/test_larpix.py::test_chip_sync_configuration",
"test/test_larpix.py::test_chip_export_reads",
"test/test_larpix.py::test_chip_export_reads_no_new_reads",
"test/test_larpix.py::test_chip_export_reads_all",
"test/test_larpix.py::test_controller_save_output",
"test/test_larpix.py::test_controller_load",
"test/test_larpix.py::test_packet_bits_bytes",
"test/test_larpix.py::test_packet_init_default",
"test/test_larpix.py::test_packet_init_bytestream",
"test/test_larpix.py::test_packet_bytes_zeros",
"test/test_larpix.py::test_packet_bytes_custom",
"test/test_larpix.py::test_packet_bytes_properties",
"test/test_larpix.py::test_packet_export_test",
"test/test_larpix.py::test_packet_export_data",
"test/test_larpix.py::test_packet_export_config_read",
"test/test_larpix.py::test_packet_export_config_write",
"test/test_larpix.py::test_packet_set_packet_type",
"test/test_larpix.py::test_packet_get_packet_type",
"test/test_larpix.py::test_packet_set_chipid",
"test/test_larpix.py::test_packet_get_chipid",
"test/test_larpix.py::test_packet_set_parity_bit_value",
"test/test_larpix.py::test_packet_get_parity_bit_value",
"test/test_larpix.py::test_packet_compute_parity",
"test/test_larpix.py::test_packet_assign_parity",
"test/test_larpix.py::test_packet_has_valid_parity",
"test/test_larpix.py::test_packet_set_channel_id",
"test/test_larpix.py::test_packet_get_channel_id",
"test/test_larpix.py::test_packet_set_timestamp",
"test/test_larpix.py::test_packet_get_timestamp",
"test/test_larpix.py::test_packet_set_dataword",
"test/test_larpix.py::test_packet_get_dataword",
"test/test_larpix.py::test_packet_get_dataword_ADC_bug",
"test/test_larpix.py::test_packet_set_fifo_half_flag",
"test/test_larpix.py::test_packet_get_fifo_half_flag",
"test/test_larpix.py::test_packet_set_fifo_full_flag",
"test/test_larpix.py::test_packet_get_fifo_full_flag",
"test/test_larpix.py::test_packet_set_register_address",
"test/test_larpix.py::test_packet_get_register_address",
"test/test_larpix.py::test_packet_set_register_data",
"test/test_larpix.py::test_packet_get_register_data",
"test/test_larpix.py::test_packet_set_test_counter",
"test/test_larpix.py::test_packet_get_test_counter",
"test/test_larpix.py::test_configuration_get_nondefault_registers",
"test/test_larpix.py::test_configuration_get_nondefault_registers_array",
"test/test_larpix.py::test_configuration_get_nondefault_registers_many_changes",
"test/test_larpix.py::test_configuration_set_pixel_trim_thresholds",
"test/test_larpix.py::test_configuration_get_pixel_trim_thresholds",
"test/test_larpix.py::test_configuration_set_global_threshold",
"test/test_larpix.py::test_configuration_get_global_threshold",
"test/test_larpix.py::test_configuration_set_csa_gain",
"test/test_larpix.py::test_configuration_get_csa_gain",
"test/test_larpix.py::test_configuration_set_csa_bypass",
"test/test_larpix.py::test_configuration_get_csa_bypass",
"test/test_larpix.py::test_configuration_set_internal_bypass",
"test/test_larpix.py::test_configuration_get_internal_bypass",
"test/test_larpix.py::test_configuration_set_csa_bypass_select",
"test/test_larpix.py::test_configuration_get_csa_bypass_select",
"test/test_larpix.py::test_configuration_set_csa_monitor_select",
"test/test_larpix.py::test_configuration_get_csa_monitor_select",
"test/test_larpix.py::test_configuration_set_csa_testpulse_enable",
"test/test_larpix.py::test_configuration_get_csa_testpulse_enable",
"test/test_larpix.py::test_configuration_set_csa_testpulse_dac_amplitude",
"test/test_larpix.py::test_configuration_get_csa_testpulse_dac_amplitude",
"test/test_larpix.py::test_configuration_set_test_mode",
"test/test_larpix.py::test_configuration_get_test_mode",
"test/test_larpix.py::test_configuration_set_cross_trigger_mode",
"test/test_larpix.py::test_configuration_get_cross_trigger_mode",
"test/test_larpix.py::test_configuration_set_periodic_reset",
"test/test_larpix.py::test_configuration_get_periodic_reset",
"test/test_larpix.py::test_configuration_set_fifo_diagnostic",
"test/test_larpix.py::test_configuration_get_fifo_diagnostic",
"test/test_larpix.py::test_configuration_set_test_burst_length",
"test/test_larpix.py::test_configuration_get_test_burst_length",
"test/test_larpix.py::test_configuration_set_adc_burst_length",
"test/test_larpix.py::test_configuration_get_adc_burst_length",
"test/test_larpix.py::test_configuration_set_channel_mask",
"test/test_larpix.py::test_configuration_get_channel_mask",
"test/test_larpix.py::test_configuration_set_external_trigger_mask",
"test/test_larpix.py::test_configuration_get_external_trigger_mask",
"test/test_larpix.py::test_configuration_set_reset_cycles",
"test/test_larpix.py::test_configuration_get_reset_cycles",
"test/test_larpix.py::test_configuration_disable_channels",
"test/test_larpix.py::test_configuration_disable_channels_default",
"test/test_larpix.py::test_configuration_enable_channels",
"test/test_larpix.py::test_configuration_enable_channels_default",
"test/test_larpix.py::test_configuration_enable_external_trigger",
"test/test_larpix.py::test_configuration_enable_external_trigger_default",
"test/test_larpix.py::test_configuration_disable_external_trigger",
"test/test_larpix.py::test_configuration_enable_testpulse",
"test/test_larpix.py::test_configuration_enable_testpulse_default",
"test/test_larpix.py::test_configuration_disable_testpulse",
"test/test_larpix.py::test_configuration_disable_testpulse_default",
"test/test_larpix.py::test_configuration_enable_analog_monitor",
"test/test_larpix.py::test_configuration_disable_analog_monitor",
"test/test_larpix.py::test_configuration_trim_threshold_data",
"test/test_larpix.py::test_configuration_global_threshold_data",
"test/test_larpix.py::test_configuration_csa_gain_and_bypasses_data",
"test/test_larpix.py::test_configuration_csa_bypass_select_data",
"test/test_larpix.py::test_configuration_csa_monitor_select_data",
"test/test_larpix.py::test_configuration_csa_testpulse_enable_data",
"test/test_larpix.py::test_configuration_csa_testpulse_dac_amplitude_data",
"test/test_larpix.py::test_configuration_test_mode_xtrig_reset_diag_data",
"test/test_larpix.py::test_configuration_sample_cycles_data",
"test/test_larpix.py::test_configuration_test_burst_length_data",
"test/test_larpix.py::test_configuration_adc_burst_length_data",
"test/test_larpix.py::test_configuration_channel_mask_data",
"test/test_larpix.py::test_configuration_external_trigger_mask_data",
"test/test_larpix.py::test_configuration_reset_cycles_data",
"test/test_larpix.py::test_configuration_to_dict",
"test/test_larpix.py::test_configuration_from_dict",
"test/test_larpix.py::test_configuration_write",
"test/test_larpix.py::test_configuration_write_force",
"test/test_larpix.py::test_configuration_read_absolute",
"test/test_larpix.py::test_configuration_read_default",
"test/test_larpix.py::test_configuration_read_local",
"test/test_larpix.py::test_configuration_from_dict_reg_pixel_trim",
"test/test_larpix.py::test_configuration_from_dict_reg_global_threshold",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_gain",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_bypass",
"test/test_larpix.py::test_configuration_from_dict_reg_internal_bypass",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_bypass_select",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_monitor_select",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_testpulse_enable",
"test/test_larpix.py::test_configuration_from_dict_reg_csa_testpulse_dac_amplitude",
"test/test_larpix.py::test_configuration_from_dict_reg_test_mode",
"test/test_larpix.py::test_configuration_from_dict_reg_cross_trigger_mode",
"test/test_larpix.py::test_configuration_from_dict_reg_periodic_reset",
"test/test_larpix.py::test_configuration_from_dict_reg_fifo_diagnostic",
"test/test_larpix.py::test_configuration_from_dict_reg_sample_cycles",
"test/test_larpix.py::test_configuration_from_dict_reg_test_burst_length",
"test/test_larpix.py::test_configuration_from_dict_reg_adc_burst_length",
"test/test_larpix.py::test_configuration_from_dict_reg_channel_mask",
"test/test_larpix.py::test_configuration_from_dict_reg_external_trigger_mask",
"test/test_larpix.py::test_configuration_from_dict_reg_reset_cycles",
"test/test_larpix.py::test_controller_init_chips",
"test/test_larpix.py::test_controller_get_chip",
"test/test_larpix.py::test_controller_get_chip_all_chips",
"test/test_larpix.py::test_controller_serial_read_mock",
"test/test_larpix.py::test_controller_serial_write_mock",
"test/test_larpix.py::test_controller_serial_write_read_mock",
"test/test_larpix.py::test_packetcollection_getitem_int",
"test/test_larpix.py::test_packetcollection_getitem_int_bits",
"test/test_larpix.py::test_packetcollection_getitem_slice",
"test/test_larpix.py::test_packetcollection_getitem_slice_bits",
"test/test_larpix.py::test_packetcollection_origin",
"test/test_larpix.py::test_packetcollection_from_dict"
] | [] | null | 1,929 | 269 | [
"larpix/larpix.py"
] |
|
discos__simulators-103 | cec38fb58da280acfd66707e8c6438bb07b39d88 | 2017-12-01 16:15:18 | cec38fb58da280acfd66707e8c6438bb07b39d88 | coveralls:
[](https://coveralls.io/builds/14460334)
Coverage decreased (-0.4%) to 97.977% when pulling **7e009b99ec957c63ed43f3ad059a07d895642162 on fix-issue-102** into **cec38fb58da280acfd66707e8c6438bb07b39d88 on master**.
codecov-io: # [Codecov](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=h1) Report
> Merging [#103](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=desc) into [master](https://codecov.io/gh/discos/simulators/commit/cec38fb58da280acfd66707e8c6438bb07b39d88?src=pr&el=desc) will **increase** coverage by `0.23%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #103 +/- ##
==========================================
+ Coverage 98.41% 98.65% +0.23%
==========================================
Files 11 11
Lines 2209 2300 +91
==========================================
+ Hits 2174 2269 +95
+ Misses 35 31 -4
```
| [Impacted Files](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [simulators/utils.py](https://codecov.io/gh/discos/simulators/pull/103/diff?src=pr&el=tree#diff-c2ltdWxhdG9ycy91dGlscy5weQ==) | `100% <100%> (+5.79%)` | :arrow_up: |
| [tests/test\_utils.py](https://codecov.io/gh/discos/simulators/pull/103/diff?src=pr&el=tree#diff-dGVzdHMvdGVzdF91dGlscy5weQ==) | `100% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=footer). Last update [cec38fb...313820c](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
coveralls:
[](https://coveralls.io/builds/14489780)
Coverage increased (+0.2%) to 98.652% when pulling **313820c0c109ef8f53c68f432379dd07a6b51b5a on fix-issue-102** into **cec38fb58da280acfd66707e8c6438bb07b39d88 on master**.
| diff --git a/simulators/utils.py b/simulators/utils.py
index 1b7462c..35cf994 100644
--- a/simulators/utils.py
+++ b/simulators/utils.py
@@ -1,3 +1,4 @@
+#!/usr/bin/python
import math
import struct
from datetime import datetime
@@ -144,16 +145,59 @@ def real_to_binary(num, precision=1):
)
-def mjd():
- """Return the modified julian date.
- For more informations about modified julian date check the following link:
- https://bowie.gsfc.nasa.gov/time/"""
+def real_to_bytes(num, precision=1):
+ """Return the bytestring representation of a floating-point number
+ (IEEE 754 standard)."""
+ binary_number = real_to_binary(num, precision)
+ return binary_to_bytes(binary_number)
- utcnow = datetime.utcnow()
- year = utcnow.year
- month = utcnow.month
- day = utcnow.day
+def bytes_to_real(bytes_real, precision=1):
+ """Return the floating-point representation (IEEE 754 standard)
+ of bytestring number."""
+ if precision == 1:
+ return struct.unpack('!f', bytes_real)[0]
+ elif precision == 2:
+ return struct.unpack('!d', bytes_real)[0]
+ else:
+ raise ValueError(
+ "Unknown precision %d."
+ % (precision)
+ )
+
+
+def int_to_bytes(val, n_bytes=4):
+ """Return the bytestring representation of a given signed integer."""
+ return binary_to_bytes(int_to_twos(val, n_bytes))
+
+
+def uint_to_bytes(val, n_bytes=4):
+ """Return the bytestring representation of a given unsigned integer."""
+ n_bits = 8 * n_bytes
+ min_range = 0
+ max_range = int(math.pow(2, n_bits)) - 1
+
+ if val < min_range or val > max_range:
+ raise ValueError(
+ "%d out of range (%d, %d)."
+ % (val, min_range, max_range)
+ )
+
+ return binary_to_bytes(bin(val)[2:].zfill(n_bytes * 8))
+
+
+def mjd(time=datetime.utcnow()):
+ """Returns the modified julian date (MJD) of a given datetime object.
+ If no datetime object is given, it returns the current MJD.
+ For more informations about modified julian date check the following link:
+ https://bowie.gsfc.nasa.gov/time/"""
+ year = time.year
+ month = time.month
+ day = time.day
+ hour = time.hour
+ minute = time.minute
+ second = time.second
+ microsecond = time.microsecond
if month == 1 or month == 2:
yearp = year - 1
@@ -173,11 +217,7 @@ def mjd():
a = math.trunc(yearp / 100.)
b = 2 - a + math.trunc(a / 4.)
- if yearp < 0:
- c = math.trunc((365.25 * yearp) - 0.75)
- else:
- c = math.trunc(365.25 * yearp)
-
+ c = math.trunc(365.25 * yearp)
d = math.trunc(30.6001 * (monthp + 1))
jd = b + c + d + day + 1720994.5
@@ -185,13 +225,13 @@ def mjd():
modified_julian_day = jd - 2400000.5
# Total UTC hours of the day
- day_hours = utcnow.hour
+ day_hours = hour
# Total minutes of the day
- day_minutes = (day_hours * 60) + utcnow.minute
+ day_minutes = (day_hours * 60) + minute
# Total seconds of the day
- day_seconds = (day_minutes * 60) + utcnow.second
+ day_seconds = (day_minutes * 60) + second
# Total microseconds of the day
- day_microseconds = (day_seconds * 1000000) + utcnow.microsecond
+ day_microseconds = (day_seconds * 1000000) + microsecond
# Day percentage, 00:00 = 0.0, 24:00=1.0
day_percentage = round(float(day_microseconds) / 86400000000, 6)
| Enhance `utils.py` to provide functions useful to the `acu.py` module. | discos/simulators | diff --git a/tests/test_utils.py b/tests/test_utils.py
index 65a8878..0f24101 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -1,4 +1,5 @@
import unittest
+from datetime import datetime
from simulators import utils
@@ -40,10 +41,23 @@ class TestServer(unittest.TestCase):
with self.assertRaises(ValueError):
utils.int_to_twos(4294967295)
- def test_mjd(self):
+ def test_mjd_now(self):
"""Make sure that the datatype of the response is the correct one."""
self.assertIsInstance(utils.mjd(), float)
+ def test_mjd_given_date(self):
+ """Return the modified julian date of a given datetime object."""
+ time = datetime(2017, 12, 4, 13, 51, 10, 162534)
+ result = utils.mjd(time)
+ expected_result = 58091.577201
+ self.assertEqual(result, expected_result)
+
+ def test_mjd_old_date(self):
+ time = datetime(1500, 1, 1, 12, 0, 0, 0)
+ result = utils.mjd(time)
+ expected_result = -131067.5
+ self.assertEqual(result, expected_result)
+
def test_day_milliseconds(self):
"""Make sure that the datatype of the response is the correct one.
Also make sure that the returned value is inside the expected range."""
@@ -88,7 +102,6 @@ class TestServer(unittest.TestCase):
self.assertEqual(result, expected_result)
def test_real_to_binary_double_precision(self):
- """Convert a real number to its binary representation."""
number = 3.14159265358979323846264338327950288419716939937510582097494
result = utils.real_to_binary(number, 2)
expected_result = (
@@ -109,5 +122,86 @@ class TestServer(unittest.TestCase):
with self.assertRaises(ValueError):
utils.real_to_binary(number, 3)
+ def test_real_to_bytes_single_precision(self):
+ """Convert a real number to a string of bytes."""
+ number = 45.12371938725634
+ result = utils.real_to_bytes(number)
+ expected_result = b'\x42\x34\x7E\xB0'
+ self.assertEqual(result, expected_result)
+
+ def test_real_to_bytes_double_precision(self):
+ number = 3.14159265358979323846264338327950288419716939937510582097494
+ result = utils.real_to_bytes(number, 2)
+ expected_result = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
+ self.assertEqual(result, expected_result)
+
+ def test_real_to_bytes_unknown_precision(self):
+ number = 3267.135248123736
+ with self.assertRaises(ValueError):
+ utils.real_to_binary(number, 3)
+
+ def test_bytes_to_real_single_precision(self):
+ """Convert a string of bytes to a floating point number."""
+ byte_string = b'\x42\x34\x7E\xB0'
+ result = utils.bytes_to_real(byte_string)
+ expected_result = 45.12371826171875
+ self.assertEqual(result, expected_result)
+
+ def test_bytes_to_real_double_precision(self):
+ byte_string = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
+ result = utils.bytes_to_real(byte_string, 2)
+ expected_result = (
+ 3.14159265358979323846264338327950288419716939937510582097494
+ )
+ self.assertEqual(result, expected_result)
+
+ def test_bytes_to_real_unknown_precision(self):
+ byte_string = b'\xDA\x35\xF7\x65'
+ with self.assertRaises(ValueError):
+ utils.bytes_to_real(byte_string, 3)
+
+ def test_int_to_bytes_positive(self):
+ """Convert a signed integer to a string of bytes."""
+ number = 232144
+ result = utils.int_to_bytes(number)
+ expected_result = b'\x00\x03\x8A\xD0'
+ self.assertEqual(result, expected_result)
+
+ def test_int_to_bytes_negative(self):
+ number = -4522764
+ result = utils.int_to_bytes(number)
+ expected_result = b'\xFF\xBA\xFC\xF4'
+ self.assertEqual(result, expected_result)
+
+ def test_int_to_bytes_out_of_range(self):
+ number = 36273463
+ with self.assertRaises(ValueError):
+ utils.int_to_bytes(number, 2)
+
+ def test_int_to_bytes_wrong(self):
+ number = 6814627
+ result = utils.int_to_bytes(number)
+ wrong_expected_result = b'\xFF\x98\x04\x5D'
+ self.assertNotEqual(result, wrong_expected_result)
+
+ def test_uint_to_bytes(self):
+ """Convert an unsigned integer to a string of bytes."""
+ number = 1284639736
+ result = utils.uint_to_bytes(number)
+ expected_result = b'\x4C\x92\x0B\xF8'
+ self.assertEqual(result, expected_result)
+
+ def test_uint_to_bytes_out_of_range(self):
+ number = 13463672713
+ with self.assertRaises(ValueError):
+ utils.uint_to_bytes(number)
+
+ def test_uint_to_bytes_wrong(self):
+ number = 1235326152
+ result = utils.uint_to_bytes(number)
+ wrong_expected_result = b'\x00\x34\xAE\xDD'
+ self.assertNotEqual(result, wrong_expected_result)
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"coverage",
"prospector",
"sphinx",
"sphinx_rtd_theme",
"tox",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
astroid==3.3.9
babel==2.17.0
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
dill==0.3.9
-e git+https://github.com/discos/simulators.git@cec38fb58da280acfd66707e8c6438bb07b39d88#egg=discos_simulators
distlib==0.3.9
docutils==0.21.2
dodgy==0.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
flake8==7.2.0
flake8-polyfill==1.0.2
gitdb==4.0.12
GitPython==3.1.44
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==6.0.1
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pep8-naming==0.10.0
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
prospector==1.16.1
pycodestyle==2.13.0
pydocstyle==6.3.0
pyflakes==3.3.2
Pygments==2.19.1
pylint==3.3.6
pylint-celery==0.3
pylint-django==2.6.1
pylint-plugin-utils==0.8.2
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
requests==2.32.3
requirements-detector==1.3.2
semver==3.0.4
setoptconf-tmp==0.3.1
smmap==5.0.2
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
toml==0.10.2
tomli==2.2.1
tomlkit==0.13.2
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: simulators
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- astroid==3.3.9
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- dill==0.3.9
- distlib==0.3.9
- docutils==0.21.2
- dodgy==0.2.1
- filelock==3.18.0
- flake8==7.2.0
- flake8-polyfill==1.0.2
- gitdb==4.0.12
- gitpython==3.1.44
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- isort==6.0.1
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- pep8-naming==0.10.0
- platformdirs==4.3.7
- prospector==1.16.1
- pycodestyle==2.13.0
- pydocstyle==6.3.0
- pyflakes==3.3.2
- pygments==2.19.1
- pylint==3.3.6
- pylint-celery==0.3
- pylint-django==2.6.1
- pylint-plugin-utils==0.8.2
- pyproject-api==1.9.0
- pyyaml==6.0.2
- requests==2.32.3
- requirements-detector==1.3.2
- semver==3.0.4
- setoptconf-tmp==0.3.1
- smmap==5.0.2
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- toml==0.10.2
- tomli==2.2.1
- tomlkit==0.13.2
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/simulators
| [
"tests/test_utils.py::TestServer::test_bytes_to_real_double_precision",
"tests/test_utils.py::TestServer::test_bytes_to_real_single_precision",
"tests/test_utils.py::TestServer::test_bytes_to_real_unknown_precision",
"tests/test_utils.py::TestServer::test_int_to_bytes_out_of_range",
"tests/test_utils.py::TestServer::test_mjd_given_date",
"tests/test_utils.py::TestServer::test_mjd_old_date",
"tests/test_utils.py::TestServer::test_uint_to_bytes_out_of_range"
] | [
"tests/test_utils.py::TestServer::test_binary_to_bytes_correct",
"tests/test_utils.py::TestServer::test_binary_to_bytes_wrong",
"tests/test_utils.py::TestServer::test_bytes_to_int_correct",
"tests/test_utils.py::TestServer::test_bytes_to_int_wrong",
"tests/test_utils.py::TestServer::test_day_milliseconds",
"tests/test_utils.py::TestServer::test_int_to_bytes_negative",
"tests/test_utils.py::TestServer::test_int_to_bytes_positive",
"tests/test_utils.py::TestServer::test_int_to_bytes_wrong",
"tests/test_utils.py::TestServer::test_real_to_binary_double_precision",
"tests/test_utils.py::TestServer::test_real_to_binary_single_precision",
"tests/test_utils.py::TestServer::test_real_to_binary_wrong",
"tests/test_utils.py::TestServer::test_real_to_bytes_double_precision",
"tests/test_utils.py::TestServer::test_real_to_bytes_single_precision",
"tests/test_utils.py::TestServer::test_uint_to_bytes",
"tests/test_utils.py::TestServer::test_uint_to_bytes_wrong"
] | [
"tests/test_utils.py::TestServer::test_int_to_twos",
"tests/test_utils.py::TestServer::test_mjd_now",
"tests/test_utils.py::TestServer::test_out_of_range_int_to_twos",
"tests/test_utils.py::TestServer::test_real_to_binary_unknown_precision",
"tests/test_utils.py::TestServer::test_real_to_bytes_unknown_precision",
"tests/test_utils.py::TestServer::test_right_checksum",
"tests/test_utils.py::TestServer::test_right_twos_to_int",
"tests/test_utils.py::TestServer::test_wrong_checksum",
"tests/test_utils.py::TestServer::test_wrong_twos_to_int"
] | [] | null | 1,931 | 1,105 | [
"simulators/utils.py"
] |
lbl-srg__BuildingsPy-183 | ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158 | 2017-12-05 15:34:13 | 923b1087e255f7f35224aa7c1653abf9c038f849 | diff --git a/buildingspy/development/error_dictionary.py b/buildingspy/development/error_dictionary.py
index 2304f63..4a3c8d3 100644
--- a/buildingspy/development/error_dictionary.py
+++ b/buildingspy/development/error_dictionary.py
@@ -138,6 +138,13 @@ class ErrorDictionary(object):
'model_message': "\"inner Modelica.StateGraph.StateGraphRoot\" is missing in '{}'.\n",
'summary_message': "Number of models with missing StateGraphRoot : {}\n"}
+ self._error_dict["mismatched displayUnits"] = {
+ 'tool_message': "Mismatched displayUnit",
+ 'counter': 0,
+ 'buildingspy_var': "iMisDisUni",
+ 'model_message': "\"Mismatched displayUnit in '{}'.\n",
+ 'summary_message': "Number of models with mismatched displayUnit : {}\n"}
+
def get_dictionary(self):
""" Return the dictionary with all error data
"""
diff --git a/buildingspy/development/refactor.py b/buildingspy/development/refactor.py
index ba4d363..2c064b0 100644
--- a/buildingspy/development/refactor.py
+++ b/buildingspy/development/refactor.py
@@ -637,6 +637,10 @@ def move_class(source, target):
"""
##############################################################
+ # First, remove empty subdirectories
+ _remove_empty_folders(source.replace(".", os.path.sep),
+ removeRoot=False)
+ ##############################################################
# Check if it is a directory with a package.mo file
if os.path.isdir(source.replace(".", os.path.sep)):
_move_class_directory(source, target)
@@ -665,6 +669,26 @@ def move_class(source, target):
_update_all_references(source, target)
+def _remove_empty_folders(path, removeRoot=True):
+ ''' Remove empty directories
+ '''
+ if not os.path.isdir(path):
+ return
+
+ # remove empty subfolders
+ files = os.listdir(path)
+ if len(files):
+ for f in files:
+ fullpath = os.path.join(path, f)
+ if os.path.isdir(fullpath):
+ _remove_empty_folders(fullpath)
+
+ # if folder empty, delete it
+ files = os.listdir(path)
+ if len(files) == 0 and removeRoot:
+ os.rmdir(path)
+
+
def _update_all_references(source, target):
""" Updates all references in `.mo` and `.mos` files.
diff --git a/buildingspy/fmi/__init__.py b/buildingspy/fmi/__init__.py
index 4efbae7..7bb1d9d 100644
--- a/buildingspy/fmi/__init__.py
+++ b/buildingspy/fmi/__init__.py
@@ -58,8 +58,6 @@ def get_dependencies(fmu_file_name):
]
},
"InitialUnknowns": {
- "CPUtime": [],
- "EventCounter": [],
"der(x)": [
"u"
],
@@ -72,8 +70,6 @@ def get_dependencies(fmu_file_name):
]
},
"Outputs": {
- "CPUtime": [],
- "EventCounter": [],
"y1": [
"x"
],
@@ -120,8 +116,13 @@ def get_dependencies(fmu_file_name):
#this_root = outputs
for child in children:
variable = variable_names[int(child.attrib['index'])]
- dependencies[typ][variable] = []
- for ind_var in child.attrib['dependencies'].split(' '):
- if ind_var.strip() != "": # If variables depend on nothing, there will be an empty string
- dependencies[typ][variable].append(variable_names[int(ind_var)])
+ # Exclude CPUtime and EventCounter, which are written
+ # depending on the Dymola 2018FD01 configuration.
+ if variable not in ["CPUtime", "EventCounter"]:
+ dependencies[typ][variable] = []
+ for ind_var in child.attrib['dependencies'].split(' '):
+ # If variables depend on nothing, there will be an empty string, these
+ # are therefore excluded.
+ if ind_var.strip() != "":
+ dependencies[typ][variable].append(variable_names[int(ind_var)])
return dependencies
| In reference results, exclude CPUtime and EventCounter in FMI dependencies
These are only written based on the configuration of Dymola. As they are only output of the solver, they should be excluded from the reference results. | lbl-srg/BuildingsPy | diff --git a/buildingspy/tests/test_development_error_dictionary.py b/buildingspy/tests/test_development_error_dictionary.py
index 393e2cc..ee9d12b 100644
--- a/buildingspy/tests/test_development_error_dictionary.py
+++ b/buildingspy/tests/test_development_error_dictionary.py
@@ -39,7 +39,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'type inconsistent definition equations',
'unspecified initial conditions',
'unused connector',
- 'stateGraphRoot missing'])
+ 'stateGraphRoot missing',
+ 'mismatched displayUnits'])
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
@@ -63,7 +64,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'Type inconsistent definition equation',
'Dymola has selected default initial condition',
'Warning: The following connector variables are not used in the model',
- "A \\\"stateGraphRoot\\\" component was automatically introduced."])
+ "A \\\"stateGraphRoot\\\" component was automatically introduced.",
+ "Mismatched displayUnit"])
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"autopep8",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc tidy"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
autopep8==2.0.4
-e git+https://github.com/lbl-srg/BuildingsPy.git@ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158#egg=buildingspy
certifi==2021.5.30
future==1.0.0
gitdb==4.0.9
GitPython==3.1.18
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.10.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytidylib==0.3.2
smmap==5.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: BuildingsPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- autopep8==2.0.4
- future==1.0.0
- gitdb==4.0.9
- gitpython==3.1.18
- jinja2==3.0.3
- markupsafe==2.0.1
- pycodestyle==2.10.0
- pytidylib==0.3.2
- smmap==5.0.0
- tomli==1.2.3
prefix: /opt/conda/envs/BuildingsPy
| [
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_keys",
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_tool_messages"
] | [] | [] | [] | null | 1,934 | 1,015 | [
"buildingspy/development/error_dictionary.py",
"buildingspy/development/refactor.py",
"buildingspy/fmi/__init__.py"
] |
|
dpkp__kafka-python-1312 | 141b6b29609f9594ad9d3d3302a0123d1b831261 | 2017-12-06 00:48:48 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | diff --git a/kafka/client_async.py b/kafka/client_async.py
index e36d78e..1350503 100644
--- a/kafka/client_async.py
+++ b/kafka/client_async.py
@@ -947,7 +947,7 @@ class DelayedTaskQueue(object):
"""Number of seconds until next task is ready."""
self._drop_removed()
if not self._tasks:
- return 9999999999
+ return float('inf')
else:
return max(self._tasks[0][0] - time.time(), 0)
diff --git a/kafka/conn.py b/kafka/conn.py
index 51a007c..2926e2f 100644
--- a/kafka/conn.py
+++ b/kafka/conn.py
@@ -251,67 +251,42 @@ class BrokerConnection(object):
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = None
- self._gai_index = 0
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
+ def _next_afi_host_port(self):
+ if not self._gai:
+ self._gai = dns_lookup(self._init_host, self._init_port, self._init_afi)
+ if not self._gai:
+ log.error('DNS lookup failed for %s:%i (%s)',
+ self._init_host, self._init_port, self._init_afi)
+ return
+
+ afi, _, __, ___, sockaddr = self._gai.pop(0)
+ host, port = sockaddr[:2]
+ return (afi, host, port)
+
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED:
- log.debug('%s: creating new socket', self)
- # if self.afi is set to AF_UNSPEC, then we need to do a name
- # resolution and try all available address families
- if self._init_afi == socket.AF_UNSPEC:
- if self._gai is None:
- # XXX: all DNS functions in Python are blocking. If we really
- # want to be non-blocking here, we need to use a 3rd-party
- # library like python-adns, or move resolution onto its
- # own thread. This will be subject to the default libc
- # name resolution timeout (5s on most Linux boxes)
- try:
- self._gai = socket.getaddrinfo(self._init_host,
- self._init_port,
- socket.AF_UNSPEC,
- socket.SOCK_STREAM)
- except socket.gaierror as ex:
- log.warning('DNS lookup failed for %s:%d,'
- ' exception was %s. Is your'
- ' advertised.listeners (called'
- ' advertised.host.name before Kafka 9)'
- ' correct and resolvable?',
- self._init_host, self._init_port, ex)
- self._gai = []
- self._gai_index = 0
- else:
- # if self._gai already exists, then we should try the next
- # name
- self._gai_index += 1
- while True:
- if self._gai_index >= len(self._gai):
- error = 'Unable to connect to any of the names for {0}:{1}'.format(
- self._init_host, self._init_port)
- log.error(error)
- self.close(Errors.ConnectionError(error))
- return
- afi, _, __, ___, sockaddr = self._gai[self._gai_index]
- if afi not in (socket.AF_INET, socket.AF_INET6):
- self._gai_index += 1
- continue
- break
- self.host, self.port = sockaddr[:2]
- self._sock = socket.socket(afi, socket.SOCK_STREAM)
+ self.last_attempt = time.time()
+ next_lookup = self._next_afi_host_port()
+ if not next_lookup:
+ self.close(Errors.ConnectionError('DNS failure'))
+ return
else:
- self._sock = socket.socket(self._init_afi, socket.SOCK_STREAM)
+ log.debug('%s: creating new socket', self)
+ self.afi, self.host, self.port = next_lookup
+ self._sock = socket.socket(self.afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
- self.last_attempt = time.time()
self.state = ConnectionStates.CONNECTING
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
self._wrap_ssl()
@@ -328,11 +303,6 @@ class BrokerConnection(object):
ret = None
try:
ret = self._sock.connect_ex((self.host, self.port))
- # if we got here through a host lookup, we've found a host,port,af tuple
- # that works save it so we don't do a GAI lookup again
- if self._gai is not None:
- self.afi = self._sock.family
- self._gai = None
except socket.error as err:
ret = err.errno
@@ -607,7 +577,7 @@ class BrokerConnection(object):
elif self.connecting():
return 0
else:
- return 999999999
+ return float('inf')
def connected(self):
"""Return True iff socket is connected."""
@@ -645,23 +615,15 @@ class BrokerConnection(object):
will be failed with this exception.
Default: kafka.errors.ConnectionError.
"""
- if self.state is ConnectionStates.DISCONNECTED:
- if error is not None:
- if sys.version_info >= (3, 2):
- log.warning('%s: close() called on disconnected connection with error: %s', self, error, stack_info=True)
- else:
- log.warning('%s: close() called on disconnected connection with error: %s', self, error)
- return
-
log.info('%s: Closing connection. %s', self, error or '')
- self.state = ConnectionStates.DISCONNECTING
- self.config['state_change_callback'](self)
+ if self.state is not ConnectionStates.DISCONNECTED:
+ self.state = ConnectionStates.DISCONNECTING
+ self.config['state_change_callback'](self)
self._update_reconnect_backoff()
if self._sock:
self._sock.close()
self._sock = None
self.state = ConnectionStates.DISCONNECTED
- self.last_attempt = time.time()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
@@ -747,13 +709,12 @@ class BrokerConnection(object):
return ()
# augment respones w/ correlation_id, future, and timestamp
- for i in range(len(responses)):
+ for i, response in enumerate(responses):
(correlation_id, future, timestamp) = self.in_flight_requests.popleft()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
- response = responses[i]
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
@@ -1171,3 +1132,29 @@ def collect_hosts(hosts, randomize=True):
shuffle(result)
return result
+
+
+def is_inet_4_or_6(gai):
+ """Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
+ return gai[0] in (socket.AF_INET, socket.AF_INET6)
+
+
+def dns_lookup(host, port, afi=socket.AF_UNSPEC):
+ """Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
+ # XXX: all DNS functions in Python are blocking. If we really
+ # want to be non-blocking here, we need to use a 3rd-party
+ # library like python-adns, or move resolution onto its
+ # own thread. This will be subject to the default libc
+ # name resolution timeout (5s on most Linux boxes)
+ try:
+ return list(filter(is_inet_4_or_6,
+ socket.getaddrinfo(host, port, afi,
+ socket.SOCK_STREAM)))
+ except socket.gaierror as ex:
+ log.warning('DNS lookup failed for %s:%d,'
+ ' exception was %s. Is your'
+ ' advertised.listeners (called'
+ ' advertised.host.name before Kafka 9)'
+ ' correct and resolvable?',
+ host, port, ex)
+ return []
diff --git a/kafka/producer/kafka.py b/kafka/producer/kafka.py
index 646e773..5d32b13 100644
--- a/kafka/producer/kafka.py
+++ b/kafka/producer/kafka.py
@@ -437,7 +437,7 @@ class KafkaProducer(object):
return
if timeout is None:
# threading.TIMEOUT_MAX is available in Python3.3+
- timeout = getattr(threading, 'TIMEOUT_MAX', 999999999)
+ timeout = getattr(threading, 'TIMEOUT_MAX', float('inf'))
if getattr(threading, 'TIMEOUT_MAX', False):
assert 0 <= timeout <= getattr(threading, 'TIMEOUT_MAX')
else:
diff --git a/kafka/producer/sender.py b/kafka/producer/sender.py
index ffc67f8..48ad06e 100644
--- a/kafka/producer/sender.py
+++ b/kafka/producer/sender.py
@@ -103,7 +103,7 @@ class Sender(threading.Thread):
self._metadata.request_update()
# remove any nodes we aren't ready to send to
- not_ready_timeout = 999999999
+ not_ready_timeout = float('inf')
for node in list(ready_nodes):
if not self._client.ready(node):
log.debug('Node %s not ready; delaying produce of accumulated batch', node)
diff --git a/kafka/util.py b/kafka/util.py
index de8f228..385fd56 100644
--- a/kafka/util.py
+++ b/kafka/util.py
@@ -12,14 +12,21 @@ from kafka.vendor import six
from kafka.errors import BufferUnderflowError
-def crc32(data):
- crc = binascii.crc32(data)
- # py2 and py3 behave a little differently
- # CRC is encoded as a signed int in kafka protocol
- # so we'll convert the py3 unsigned result to signed
- if six.PY3 and crc >= 2**31:
- crc -= 2**32
- return crc
+if six.PY3:
+ MAX_INT = 2 ** 31
+ TO_SIGNED = 2 ** 32
+
+ def crc32(data):
+ crc = binascii.crc32(data)
+ # py2 and py3 behave a little differently
+ # CRC is encoded as a signed int in kafka protocol
+ # so we'll convert the py3 unsigned result to signed
+ if crc >= MAX_INT:
+ crc -= TO_SIGNED
+ return crc
+else:
+ def crc32(data):
+ return binascii.crc32(data)
def write_int_string(s):
| KafkaConsumer stuck in infinite loop on connection error
It seems to be stuck in this loop https://github.com/dpkp/kafka-python/blob/34dc9dd2fe6b47f4542c5a131e0e0cbc1b00ed80/kafka/conn.py#L294
The consumer filled up ~1TB logs over the course of 3 days, but did not throw an exception. Example logs:
```kafka.conn ERROR Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.conn ERROR Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.conn WARNING <BrokerConnection node_id=104 host=kafka-4-broker.example.com/kafka-4-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn ERROR Unable to connect to any of the names for kafka-1-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.conn ERROR Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.conn WARNING <BrokerConnection node_id=104 host=kafka-4-broker.example.com/kafka-4-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.conn WARNING <BrokerConnection node_id=101 host=kafka-1-broker.example.com/kafka-1-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-1-broker.example.com:9092
kafka.conn ERROR Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.conn ERROR Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn ERROR Unable to connect to any of the names for kafka-3-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn WARNING <BrokerConnection node_id=102 host=kafka-2-broker.example.com/kafka-2-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.conn WARNING <BrokerConnection node_id=103 host=kafka-3-broker.example.com/kafka-3-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-3-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn WARNING <BrokerConnection node_id=102 host=kafka-2-broker.example.com/kafka-2-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-2-broker.example.com:9092
``` | dpkp/kafka-python | diff --git a/test/test_conn.py b/test/test_conn.py
index 1621e60..ef7925a 100644
--- a/test/test_conn.py
+++ b/test/test_conn.py
@@ -267,3 +267,28 @@ def test_lookup_on_connect():
m.assert_called_once_with(hostname, port, 0, 1)
conn.close()
assert conn.host == ip2
+
+
+def test_relookup_on_failure():
+ hostname = 'example.org'
+ port = 9092
+ conn = BrokerConnection(hostname, port, socket.AF_UNSPEC)
+ assert conn.host == conn.hostname == hostname
+ mock_return1 = []
+ with mock.patch("socket.getaddrinfo", return_value=mock_return1) as m:
+ last_attempt = conn.last_attempt
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ assert conn.disconnected()
+ assert conn.last_attempt > last_attempt
+
+ ip2 = '127.0.0.2'
+ mock_return2 = [
+ (2, 2, 17, '', (ip2, 9092)),
+ ]
+
+ with mock.patch("socket.getaddrinfo", return_value=mock_return2) as m:
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ conn.close()
+ assert conn.host == ip2
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 5
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-catchlog pytest-pylint pytest-sugar pytest-mock mock python-snappy lz4tools xxhash",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
dill==0.3.4
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@141b6b29609f9594ad9d3d3302a0123d1b831261#egg=kafka_python
lazy-object-proxy==1.7.1
lz4tools==1.3.1.2
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-pylint==0.18.0
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
toml==0.10.2
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
wrapt==1.16.0
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- dill==0.3.4
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- lz4tools==1.3.1.2
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-pylint==0.18.0
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- toml==0.10.2
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- wrapt==1.16.0
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_conn.py::test_relookup_on_failure"
] | [] | [
"test/test_conn.py::test_connect[states0]",
"test/test_conn.py::test_connect[states1]",
"test/test_conn.py::test_connect[states2]",
"test/test_conn.py::test_connect[states3]",
"test/test_conn.py::test_connect[states4]",
"test/test_conn.py::test_connect_timeout",
"test/test_conn.py::test_blacked_out",
"test/test_conn.py::test_connected",
"test/test_conn.py::test_connecting",
"test/test_conn.py::test_send_disconnected",
"test/test_conn.py::test_send_connecting",
"test/test_conn.py::test_send_max_ifr",
"test/test_conn.py::test_send_no_response",
"test/test_conn.py::test_send_response",
"test/test_conn.py::test_send_error",
"test/test_conn.py::test_can_send_more",
"test/test_conn.py::test_recv_disconnected",
"test/test_conn.py::test_recv",
"test/test_conn.py::test_close",
"test/test_conn.py::test_collect_hosts__happy_path",
"test/test_conn.py::test_collect_hosts__ipv6",
"test/test_conn.py::test_collect_hosts__string_list",
"test/test_conn.py::test_collect_hosts__with_spaces",
"test/test_conn.py::test_lookup_on_connect"
] | [] | Apache License 2.0 | 1,935 | 2,767 | [
"kafka/client_async.py",
"kafka/conn.py",
"kafka/producer/kafka.py",
"kafka/producer/sender.py",
"kafka/util.py"
] |
|
falconry__falcon-1157 | 064232a826070c8ff528b739e560917e7da67e1e | 2017-12-07 00:46:11 | 919fd3f5a3129d04f1c7d23f5eff440ec4598e35 | codecov[bot]: # [Codecov](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=h1) Report
> Merging [#1157](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=desc) into [master](https://codecov.io/gh/falconry/falcon/commit/064232a826070c8ff528b739e560917e7da67e1e?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `100%`.
[](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #1157 +/- ##
======================================
Coverage 100% 100%
======================================
Files 36 37 +1
Lines 2378 2421 +43
Branches 347 353 +6
======================================
+ Hits 2378 2421 +43
```
| [Impacted Files](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [falcon/response\_helpers.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3Jlc3BvbnNlX2hlbHBlcnMucHk=) | `100% <100%> (ø)` | :arrow_up: |
| [falcon/routing/static.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3JvdXRpbmcvc3RhdGljLnB5) | `100% <100%> (ø)` | |
| [falcon/routing/\_\_init\_\_.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3JvdXRpbmcvX19pbml0X18ucHk=) | `100% <100%> (ø)` | :arrow_up: |
| [falcon/response.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3Jlc3BvbnNlLnB5) | `100% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=footer). Last update [064232a...869d993](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/falcon/api.py b/falcon/api.py
index beb4fff..a003f6d 100644
--- a/falcon/api.py
+++ b/falcon/api.py
@@ -140,7 +140,8 @@ class API(object):
__slots__ = ('_request_type', '_response_type',
'_error_handlers', '_media_type', '_router', '_sinks',
'_serialize_error', 'req_options', 'resp_options',
- '_middleware', '_independent_middleware', '_router_search')
+ '_middleware', '_independent_middleware', '_router_search',
+ '_static_routes')
def __init__(self, media_type=DEFAULT_MEDIA_TYPE,
request_type=Request, response_type=Response,
@@ -148,6 +149,7 @@ class API(object):
independent_middleware=False):
self._sinks = []
self._media_type = media_type
+ self._static_routes = []
# set middleware
self._middleware = helpers.prepare_middleware(
@@ -350,6 +352,53 @@ class API(object):
self._router.add_route(uri_template, method_map, resource, *args,
**kwargs)
+ def add_static_route(self, prefix, directory, downloadable=False):
+ """Add a route to a directory of static files.
+
+ Static routes provide a way to serve files directly. This
+ feature provides an alternative to serving files at the web server
+ level when you don't have that option, when authorization is
+ required, or for testing purposes.
+
+ Warning:
+ Serving files directly from the web server,
+ rather than through the Python app, will always be more efficient,
+ and therefore should be preferred in production deployments.
+
+ Static routes are matched in LIFO order. Therefore, if the same
+ prefix is used for two routes, the second one will override the
+ first. This also means that more specific routes should be added
+ *after* less specific ones. For example, the following sequence
+ would result in ``'/foo/bar/thing.js'`` being mapped to the
+ ``'/foo/bar'`` route, and ``'/foo/xyz/thing.js'`` being mapped to the
+ ``'/foo'`` route::
+
+ api.add_static_route('/foo', foo_path)
+ api.add_static_route('/foo/bar', foobar_path)
+
+ Args:
+ prefix (str): The path prefix to match for this route. If the
+ path in the requested URI starts with this string, the remainder
+ of the path will be appended to the source directory to
+ determine the file to serve. This is done in a secure manner
+ to prevent an attacker from requesting a file outside the
+ specified directory.
+
+ Note that static routes are matched in LIFO order, and are only
+ attempted after checking dynamic routes and sinks.
+
+ directory (str): The source directory from which to serve files.
+ downloadable (bool): Set to ``True`` to include a
+ Content-Disposition header in the response. The "filename"
+ directive is simply set to the name of the requested file.
+
+ """
+
+ self._static_routes.insert(
+ 0,
+ routing.StaticRoute(prefix, directory, downloadable=downloadable)
+ )
+
def add_sink(self, sink, prefix=r'/'):
"""Register a sink method for the API.
@@ -563,7 +612,13 @@ class API(object):
break
else:
- responder = falcon.responders.path_not_found
+
+ for sr in self._static_routes:
+ if sr.match(path):
+ responder = sr
+ break
+ else:
+ responder = falcon.responders.path_not_found
return (responder, params, resource, uri_template)
diff --git a/falcon/response.py b/falcon/response.py
index e10d778..9479468 100644
--- a/falcon/response.py
+++ b/falcon/response.py
@@ -14,6 +14,8 @@
"""Response class."""
+import mimetypes
+
from six import PY2
from six import string_types as STRING_TYPES
@@ -25,6 +27,7 @@ from six.moves import http_cookies # NOQA: I202
from falcon import DEFAULT_MEDIA_TYPE
from falcon.media import Handlers
from falcon.response_helpers import (
+ format_content_disposition,
format_header_value_list,
format_range,
header_property,
@@ -34,6 +37,7 @@ from falcon.util import dt_to_http, TimezoneGMT
from falcon.util.uri import encode as uri_encode
from falcon.util.uri import encode_value as uri_encode_value
+
SimpleCookie = http_cookies.SimpleCookie
CookieError = http_cookies.CookieError
@@ -679,6 +683,16 @@ class Response(object):
and ``falcon.MEDIA_GIF``.
""")
+ downloadable_as = header_property(
+ 'Content-Disposition',
+ """Set the Content-Disposition header using the given filename.
+
+ The value will be used for the "filename" directive. For example,
+ given 'report.pdf', the Content-Disposition header would be set
+ to ``'attachment; filename="report.pdf"'``.
+ """,
+ format_content_disposition)
+
etag = header_property(
'ETag',
'Set the ETag header.')
@@ -811,24 +825,32 @@ class ResponseOptions(object):
not requiring HTTPS. Note, however, that this setting can
be overridden via `set_cookie()`'s `secure` kwarg.
- default_media_type (str): The default media-type to use when
- deserializing a response. This value is normally set to the media
- type provided when a :class:`falcon.API` is initialized; however,
- if created independently, this will default to the
+ default_media_type (str): The default Internet media type (RFC 2046) to
+ use when deserializing a response. This value is normally set to the
+ media type provided when a :class:`falcon.API` is initialized;
+ however, if created independently, this will default to the
``DEFAULT_MEDIA_TYPE`` specified by Falcon.
media_handlers (Handlers): A dict-like object that allows you to
configure the media-types that you would like to handle.
By default, a handler is provided for the ``application/json``
media type.
+
+ static_media_types (dict): A mapping of dot-prefixed file extensions to
+ Internet media types (RFC 2046). Defaults to ``mimetypes.types_map``
+ after calling ``mimetypes.init()``.
"""
__slots__ = (
'secure_cookies_by_default',
'default_media_type',
'media_handlers',
+ 'static_media_types',
)
def __init__(self):
self.secure_cookies_by_default = True
self.default_media_type = DEFAULT_MEDIA_TYPE
self.media_handlers = Handlers()
+
+ mimetypes.init()
+ self.static_media_types = mimetypes.types_map
diff --git a/falcon/response_helpers.py b/falcon/response_helpers.py
index 47308c2..602eb5b 100644
--- a/falcon/response_helpers.py
+++ b/falcon/response_helpers.py
@@ -77,6 +77,12 @@ def format_range(value):
return result
+def format_content_disposition(value):
+ """Formats a Content-Disposition header given a filename."""
+
+ return 'attachment; filename="' + value + '"'
+
+
if six.PY2:
def format_header_value_list(iterable):
"""Join an iterable of strings with commas."""
diff --git a/falcon/routing/__init__.py b/falcon/routing/__init__.py
index abb9c87..51bc7d3 100644
--- a/falcon/routing/__init__.py
+++ b/falcon/routing/__init__.py
@@ -20,6 +20,7 @@ routers.
"""
from falcon.routing.compiled import CompiledRouter, CompiledRouterOptions # NOQA
+from falcon.routing.static import StaticRoute # NOQA
from falcon.routing.util import create_http_method_map # NOQA
from falcon.routing.util import map_http_methods # NOQA
from falcon.routing.util import set_default_responders # NOQA
diff --git a/falcon/routing/static.py b/falcon/routing/static.py
new file mode 100644
index 0000000..ba3fab2
--- /dev/null
+++ b/falcon/routing/static.py
@@ -0,0 +1,98 @@
+import io
+import os
+import re
+
+import falcon
+
+
+class StaticRoute(object):
+ """Represents a static route.
+
+ Args:
+ prefix (str): The path prefix to match for this route. If the
+ path in the requested URI starts with this string, the remainder
+ of the path will be appended to the source directory to
+ determine the file to serve. This is done in a secure manner
+ to prevent an attacker from requesting a file outside the
+ specified directory.
+
+ Note that static routes are matched in LIFO order, and are only
+ attempted after checking dynamic routes and sinks.
+
+ directory (str): The source directory from which to serve files. Must
+ be an absolute path.
+ downloadable (bool): Set to ``True`` to include a
+ Content-Disposition header in the response. The "filename"
+ directive is simply set to the name of the requested file.
+ """
+
+ # NOTE(kgriffs): Don't allow control characters and reserved chars
+ _DISALLOWED_CHARS_PATTERN = re.compile('[\x00-\x1f\x80-\x9f~?<>:*|\'"]')
+
+ # NOTE(kgriffs): If somehow an executable code exploit is triggerable, this
+ # minimizes how much can be included in the payload.
+ _MAX_NON_PREFIXED_LEN = 512
+
+ def __init__(self, prefix, directory, downloadable=False):
+ if not prefix.startswith('/'):
+ raise ValueError("prefix must start with '/'")
+
+ if not os.path.isabs(directory):
+ raise ValueError('directory must be an absolute path')
+
+ # NOTE(kgriffs): Ensure it ends with a path separator to ensure
+ # we only match on the complete segment. Don't raise an error
+ # because most people won't expect to have to append a slash.
+ if not prefix.endswith('/'):
+ prefix += '/'
+
+ self._prefix = prefix
+ self._directory = directory
+ self._downloadable = downloadable
+
+ def match(self, path):
+ """Check whether the given path matches this route."""
+ return path.startswith(self._prefix)
+
+ def __call__(self, req, resp):
+ """Resource responder for this route."""
+
+ without_prefix = req.path[len(self._prefix):]
+
+ # NOTE(kgriffs): Check surrounding whitespace and strip trailing
+ # periods, which are illegal on windows
+ if (not without_prefix or
+ without_prefix.strip().rstrip('.') != without_prefix or
+ self._DISALLOWED_CHARS_PATTERN.search(without_prefix) or
+ '\\' in without_prefix or
+ '//' in without_prefix or
+ len(without_prefix) > self._MAX_NON_PREFIXED_LEN):
+
+ raise falcon.HTTPNotFound()
+
+ normalized = os.path.normpath(without_prefix)
+
+ if normalized.startswith('../') or normalized.startswith('/'):
+ raise falcon.HTTPNotFound()
+
+ file_path = os.path.join(self._directory, normalized)
+
+ # NOTE(kgriffs): Final sanity-check just to be safe. This check
+ # should never succeed, but this should guard against us having
+ # overlooked something.
+ if '..' in file_path or not file_path.startswith(self._directory):
+ raise falcon.HTTPNotFound() # pragma: nocover
+
+ try:
+ resp.stream = io.open(file_path, 'rb')
+ except IOError:
+ raise falcon.HTTPNotFound()
+
+ suffix = os.path.splitext(file_path)[1]
+ resp.content_type = resp.options.static_media_types.get(
+ suffix,
+ 'application/octet-stream'
+ )
+
+ if self._downloadable:
+ resp.downloadable_as = os.path.basename(file_path)
| Route to static file
On a PaaS you may not be able to server static files from the web server directly, so let's make it easy to do this in Falcon.
Depends on #181
| falconry/falcon | diff --git a/tests/test_headers.py b/tests/test_headers.py
index 0f03ecc..79ab1df 100644
--- a/tests/test_headers.py
+++ b/tests/test_headers.py
@@ -52,6 +52,7 @@ class HeaderHelpersResource(object):
# Relative URI's are OK per http://goo.gl/DbVqR
resp.location = '/things/87'
resp.content_location = '/things/78'
+ resp.downloadable_as = 'Some File.zip'
if req.range_unit is None or req.range_unit == 'bytes':
# bytes 0-499/10240
@@ -310,6 +311,7 @@ class TestHeaders(object):
content_type = 'x-falcon/peregrine'
assert resp.content_type == content_type
assert result.headers['Content-Type'] == content_type
+ assert result.headers['Content-Disposition'] == 'attachment; filename="Some File.zip"'
cache_control = ('public, private, no-cache, no-store, '
'must-revalidate, proxy-revalidate, max-age=3600, '
diff --git a/tests/test_static.py b/tests/test_static.py
new file mode 100644
index 0000000..2fbb55c
--- /dev/null
+++ b/tests/test_static.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+
+import io
+
+import pytest
+
+import falcon
+from falcon.request import Request
+from falcon.response import Response
+from falcon.routing import StaticRoute
+import falcon.testing as testing
+
+
[email protected]
+def client():
+ app = falcon.API()
+ return testing.TestClient(app)
+
+
[email protected]('uri', [
+ # Root
+ '/static',
+ '/static/',
+ '/static/.',
+
+ # Attempt to jump out of the directory
+ '/static/..',
+ '/static/../.',
+ '/static/.././etc/passwd',
+ '/static/../etc/passwd',
+ '/static/css/../../secret',
+ '/static/css/../../etc/passwd',
+ '/static/./../etc/passwd',
+
+ # The file system probably won't process escapes, but better safe than sorry
+ '/static/css/../.\\056/etc/passwd',
+ '/static/./\\056./etc/passwd',
+ '/static/\\056\\056/etc/passwd',
+
+ # Double slash
+ '/static//test.css',
+ '/static//COM10',
+ '/static/path//test.css',
+ '/static/path///test.css',
+ '/static/path////test.css',
+ '/static/path/foo//test.css',
+
+ # Control characters (0x00–0x1f and 0x80–0x9f)
+ '/static/.\x00ssh/authorized_keys',
+ '/static/.\x1fssh/authorized_keys',
+ '/static/.\x80ssh/authorized_keys',
+ '/static/.\x9fssh/authorized_keys',
+
+ # Reserved characters (~, ?, <, >, :, *, |, ', and ")
+ '/static/~/.ssh/authorized_keys',
+ '/static/.ssh/authorized_key?',
+ '/static/.ssh/authorized_key>foo',
+ '/static/.ssh/authorized_key|foo',
+ '/static/.ssh/authorized_key<foo',
+ '/static/something:something',
+ '/static/thing*.sql',
+ '/static/\'thing\'.sql',
+ '/static/"thing".sql',
+
+ # Trailing periods and spaces
+ '/static/something.',
+ '/static/something..',
+ '/static/something ',
+ '/static/ something ',
+ '/static/ something ',
+ '/static/something\t',
+ '/static/\tsomething',
+
+ # Too long
+ '/static/' + ('t' * StaticRoute._MAX_NON_PREFIXED_LEN) + 'x',
+
+])
+def test_bad_path(uri, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: path)
+
+ sr = StaticRoute('/static', '/var/www/statics')
+
+ req = Request(testing.create_environ(
+ host='test.com',
+ path=uri,
+ app='statics'
+ ))
+
+ resp = Response()
+
+ with pytest.raises(falcon.HTTPNotFound):
+ sr(req, resp)
+
+
[email protected]('prefix, directory', [
+ ('static', '/var/www/statics'),
+ ('/static', './var/www/statics'),
+ ('/static', 'statics'),
+ ('/static', '../statics'),
+])
+def test_invalid_args(prefix, directory, client):
+ with pytest.raises(ValueError):
+ StaticRoute(prefix, directory)
+
+ with pytest.raises(ValueError):
+ client.app.add_static_route(prefix, directory)
+
+
[email protected]('uri_prefix, uri_path, expected_path, mtype', [
+ ('/static/', '/css/test.css', '/css/test.css', 'text/css'),
+ ('/static', '/css/test.css', '/css/test.css', 'text/css'),
+ (
+ '/static',
+ '/' + ('t' * StaticRoute._MAX_NON_PREFIXED_LEN),
+ '/' + ('t' * StaticRoute._MAX_NON_PREFIXED_LEN),
+ 'application/octet-stream',
+ ),
+ ('/static', '/.test.css', '/.test.css', 'text/css'),
+ ('/some/download/', '/report.pdf', '/report.pdf', 'application/pdf'),
+ ('/some/download/', '/Fancy Report.pdf', '/Fancy Report.pdf', 'application/pdf'),
+ ('/some/download', '/report.zip', '/report.zip', 'application/zip'),
+ ('/some/download', '/foo/../report.zip', '/report.zip', 'application/zip'),
+ ('/some/download', '/foo/../bar/../report.zip', '/report.zip', 'application/zip'),
+ ('/some/download', '/foo/bar/../../report.zip', '/report.zip', 'application/zip'),
+])
+def test_good_path(uri_prefix, uri_path, expected_path, mtype, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: path)
+
+ sr = StaticRoute(uri_prefix, '/var/www/statics')
+
+ req_path = uri_prefix[:-1] if uri_prefix.endswith('/') else uri_prefix
+ req_path += uri_path
+
+ req = Request(testing.create_environ(
+ host='test.com',
+ path=req_path,
+ app='statics'
+ ))
+
+ resp = Response()
+
+ sr(req, resp)
+
+ assert resp.content_type == mtype
+ assert resp.stream == '/var/www/statics' + expected_path
+
+
+def test_lifo(client, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: [path.encode('utf-8')])
+
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads')
+ client.app.add_static_route('/downloads/archive', '/opt/somesite/x')
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/downloads/thing.zip'
+
+ response = client.simulate_request(path='/downloads/archive/thingtoo.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/x/thingtoo.zip'
+
+
+def test_lifo_negative(client, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: [path.encode('utf-8')])
+
+ client.app.add_static_route('/downloads/archive', '/opt/somesite/x')
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads')
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/downloads/thing.zip'
+
+ response = client.simulate_request(path='/downloads/archive/thingtoo.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/downloads/archive/thingtoo.zip'
+
+
+def test_downloadable(client, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: [path.encode('utf-8')])
+
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads', downloadable=True)
+ client.app.add_static_route('/assets/', '/opt/somesite/assets')
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.headers['Content-Disposition'] == 'attachment; filename="thing.zip"'
+
+ response = client.simulate_request(path='/downloads/Some Report.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.headers['Content-Disposition'] == 'attachment; filename="Some Report.zip"'
+
+ response = client.simulate_request(path='/assets/css/main.css')
+ assert response.status == falcon.HTTP_200
+ assert 'Content-Disposition' not in response.headers
+
+
+def test_downloadable_not_found(client):
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads', downloadable=True)
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_404
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 4
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/tests"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
execnet==1.9.0
-e git+https://github.com/falconry/falcon.git@064232a826070c8ff528b739e560917e7da67e1e#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==3.2.0
msgpack-python==0.5.6
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-mimeparse==1.6.0
PyYAML==3.11
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- execnet==1.9.0
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==3.2.0
- msgpack-python==0.5.6
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-mimeparse==1.6.0
- pyyaml==3.11
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_after_hooks.py::test_output_validator",
"tests/test_after_hooks.py::test_serializer",
"tests/test_after_hooks.py::test_hook_as_callable_class",
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource0]",
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource1]",
"tests/test_after_hooks.py::test_wrapped_resource[resource0]",
"tests/test_after_hooks.py::test_wrapped_resource[resource1]",
"tests/test_after_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource0]",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource1]",
"tests/test_before_hooks.py::test_input_validator",
"tests/test_before_hooks.py::test_input_validator_inherited",
"tests/test_before_hooks.py::test_param_validator",
"tests/test_before_hooks.py::test_field_validator[resource0]",
"tests/test_before_hooks.py::test_field_validator[resource1]",
"tests/test_before_hooks.py::test_field_validator[resource2]",
"tests/test_before_hooks.py::test_parser",
"tests/test_before_hooks.py::test_wrapped_resource",
"tests/test_before_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_boundedstream.py::test_not_writeable",
"tests/test_cmd_print_api.py::test_traverse_with_verbose",
"tests/test_cmd_print_api.py::test_traverse",
"tests/test_cookies.py::test_response_base_case",
"tests/test_cookies.py::test_response_disable_secure_globally",
"tests/test_cookies.py::test_response_complex_case",
"tests/test_cookies.py::test_cookie_expires_naive",
"tests/test_cookies.py::test_cookie_expires_aware",
"tests/test_cookies.py::test_cookies_setable",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foofloat]",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foostring]",
"tests/test_cookies.py::test_response_unset_cookie",
"tests/test_cookies.py::test_cookie_timezone",
"tests/test_cookies.py::test_request_cookie_parsing",
"tests/test_cookies.py::test_invalid_cookies_are_ignored",
"tests/test_cookies.py::test_cookie_header_is_missing",
"tests/test_cookies.py::test_unicode_inside_ascii_range",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[42]",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_non_ascii_value[42]",
"tests/test_custom_router.py::test_custom_router_add_route_should_be_used",
"tests/test_custom_router.py::test_custom_router_find_should_be_used",
"tests/test_custom_router.py::test_can_pass_additional_params_to_add_route",
"tests/test_custom_router.py::test_custom_router_takes_req_positional_argument",
"tests/test_custom_router.py::test_custom_router_takes_req_keyword_argument",
"tests/test_default_router.py::test_user_regression_versioned_url",
"tests/test_default_router.py::test_user_regression_recipes",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People|{field}-/serviceRoot/People|susie-expected_params0]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People[{field}]-/serviceRoot/People['calvin']-expected_params1]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hobbes')-expected_params2]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hob)bes')-expected_params3]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})(z)-/serviceRoot/People(hobbes)(z)-expected_params4]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People('{field}')-/serviceRoot/People('rosalyn')-expected_params5]",
"tests/test_default_router.py::test_user_regression_special_chars[/^{field}-/^42-expected_params6]",
"tests/test_default_router.py::test_user_regression_special_chars[/+{field}-/+42-expected_params7]",
"tests/test_default_router.py::test_user_regression_special_chars[/foo/{first}_{second}/bar-/foo/abc_def_ghijk/bar-expected_params8]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}?{y}-/items/1080?768-expected_params9]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}|{y}-/items/1080|768-expected_params10]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x},{y}-/items/1080,768-expected_params11]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}^^{y}-/items/1080^^768-expected_params12]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}*{y}*-/items/1080*768*-expected_params13]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}+-/thing-2/something+42+-expected_params14]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something*{field}/notes-/thing-2/something*42/notes-expected_params15]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}|{q}/notes-/thing-2/something+else|z/notes-expected_params16]",
"tests/test_default_router.py::test_user_regression_special_chars[serviceRoot/$metadata#Airports('{field}')/Name-serviceRoot/$metadata#Airports('KSFO')/Name-expected_params17]",
"tests/test_default_router.py::test_not_str[uri_template0]",
"tests/test_default_router.py::test_not_str[uri_template1]",
"tests/test_default_router.py::test_not_str[uri_template2]",
"tests/test_default_router.py::test_root_path",
"tests/test_default_router.py::test_duplicate_field_names[/{field}{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}...{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/{another}/{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/something/something/{field}/something]",
"tests/test_default_router.py::test_match_entire_path[/items/thing-/items/t]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}|{y}|-/items/1080|768]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*{y}foo-/items/1080*768foobar]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*768*-/items/1080*768***]",
"tests/test_default_router.py::test_conflict[/teams/{conflict}]",
"tests/test_default_router.py::test_conflict[/emojis/signs/{id_too}]",
"tests/test_default_router.py::test_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}:{conflict}]",
"tests/test_default_router.py::test_conflict[/teams/{id:int}/settings]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{simple_vs_complex}]",
"tests/test_default_router.py::test_non_conflict[/repos/{complex}.{vs}.{simple}]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}/full]",
"tests/test_default_router.py::test_invalid_field_name[/{}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{}.{thing}]",
"tests/test_default_router.py::test_invalid_field_name[/{9v}]",
"tests/test_default_router.py::test_invalid_field_name[/{524hello}/world]",
"tests/test_default_router.py::test_invalid_field_name[/hello/{1world}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{9v}.{thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/{*kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/{@kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{v}.{@thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/{-kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{-v}.{thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{simple-thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/this",
"tests/test_default_router.py::test_invalid_field_name[/this\\tand\\tthat/this\\nand\\nthat/{thing",
"tests/test_default_router.py::test_invalid_field_name[/{thing\\t}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{\\nthing}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{th\\x0bing}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{",
"tests/test_default_router.py::test_invalid_field_name[/{thing}/wo",
"tests/test_default_router.py::test_invalid_field_name[/{thing}",
"tests/test_default_router.py::test_invalid_field_name[/repos/{or",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{th\\ting}]",
"tests/test_default_router.py::test_print_src",
"tests/test_default_router.py::test_override",
"tests/test_default_router.py::test_literal_segment",
"tests/test_default_router.py::test_dead_segment[/teams]",
"tests/test_default_router.py::test_dead_segment[/emojis/signs]",
"tests/test_default_router.py::test_dead_segment[/gists]",
"tests/test_default_router.py::test_dead_segment[/gists/42]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo/full]",
"tests/test_default_router.py::test_literal",
"tests/test_default_router.py::test_converters[/cvt/teams/007-expected_params0]",
"tests/test_default_router.py::test_converters[/cvt/teams/1234/members-expected_params1]",
"tests/test_default_router.py::test_converters[/cvt/teams/default/members/700-5-expected_params2]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/xkcd:353-expected_params3]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/gunmachan:1234...kumamon:5678/part-expected_params4]",
"tests/test_default_router.py::test_converters[/cvt/repos/xkcd/353/compare/susan:0001/full-expected_params5]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(0)}]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=0)}]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(-1)}/baz]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=-1)}/baz]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:}]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:unknown}/baz]",
"tests/test_default_router.py::test_variable",
"tests/test_default_router.py::test_single_character_field_name",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default-19]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default/members-7]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default-31]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default/members/1234-10-32]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234-6]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234/members-7]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first-20]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/raw-18]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/pdf-21]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/1776/pdf-21]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78-13]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small.png-24]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small(png)-25]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small_png-26]",
"tests/test_default_router.py::test_not_found[/this/does/not/exist]",
"tests/test_default_router.py::test_not_found[/user/bogus]",
"tests/test_default_router.py::test_not_found[/repos/racker/falcon/compare/johndoe:master...janedoe:dev/bogus]",
"tests/test_default_router.py::test_not_found[/teams]",
"tests/test_default_router.py::test_not_found[/teams/42/members/undefined]",
"tests/test_default_router.py::test_not_found[/teams/42/undefined]",
"tests/test_default_router.py::test_not_found[/teams/42/undefined/segments]",
"tests/test_default_router.py::test_not_found[/teams/default/members/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined/segments]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined/segments]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members]",
"tests/test_default_router.py::test_not_found[/cvt/teams/NaN]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members/NaN]",
"tests/test_default_router.py::test_not_found[/emojis/signs]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/small]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined/segments]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/small]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/42/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/78/undefined]",
"tests/test_default_router.py::test_subsegment_not_found",
"tests/test_default_router.py::test_multivar",
"tests/test_default_router.py::test_complex[-5]",
"tests/test_default_router.py::test_complex[/full-10]",
"tests/test_default_router.py::test_complex[/part-15]",
"tests/test_default_router.py::test_complex_alt[-16-/repos/{org}/{repo}/compare/{usr0}:{branch0}]",
"tests/test_default_router.py::test_complex_alt[/full-17-/repos/{org}/{repo}/compare/{usr0}:{branch0}/full]",
"tests/test_default_router.py::test_options_converters_set",
"tests/test_default_router.py::test_options_converters_update[spam]",
"tests/test_default_router.py::test_options_converters_update[spam_2]",
"tests/test_default_router.py::test_options_converters_invalid_name[has",
"tests/test_default_router.py::test_options_converters_invalid_name[whitespace",
"tests/test_default_router.py::test_options_converters_invalid_name[",
"tests/test_default_router.py::test_options_converters_invalid_name[funky$character]",
"tests/test_default_router.py::test_options_converters_invalid_name[42istheanswer]",
"tests/test_default_router.py::test_options_converters_invalid_name[with-hyphen]",
"tests/test_default_router.py::test_options_converters_invalid_name_on_update",
"tests/test_deps.py::test_deps_mimeparse_correct_package",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadRequest-400",
"tests/test_error.py::test_with_default_title_and_desc[HTTPForbidden-403",
"tests/test_error.py::test_with_default_title_and_desc[HTTPConflict-409",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLengthRequired-411",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionFailed-412",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestEntityTooLarge-413",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUriTooLong-414",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnprocessableEntity-422",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLocked-423",
"tests/test_error.py::test_with_default_title_and_desc[HTTPFailedDependency-424",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionRequired-428",
"tests/test_error.py::test_with_default_title_and_desc[HTTPTooManyRequests-429",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestHeaderFieldsTooLarge-431",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnavailableForLegalReasons-451",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInternalServerError-500",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNotImplemented-501",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadGateway-502",
"tests/test_error.py::test_with_default_title_and_desc[HTTPServiceUnavailable-503",
"tests/test_error.py::test_with_default_title_and_desc[HTTPGatewayTimeout-504",
"tests/test_error.py::test_with_default_title_and_desc[HTTPVersionNotSupported-505",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInsufficientStorage-507",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLoopDetected-508",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNetworkAuthenticationRequired-511",
"tests/test_error.py::test_with_title_and_desc[HTTPBadRequest]",
"tests/test_error.py::test_with_title_and_desc[HTTPForbidden]",
"tests/test_error.py::test_with_title_and_desc[HTTPConflict]",
"tests/test_error.py::test_with_title_and_desc[HTTPLengthRequired]",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionFailed]",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionRequired]",
"tests/test_error.py::test_with_title_and_desc[HTTPUriTooLong]",
"tests/test_error.py::test_with_title_and_desc[HTTPUnprocessableEntity]",
"tests/test_error.py::test_with_title_and_desc[HTTPLocked]",
"tests/test_error.py::test_with_title_and_desc[HTTPFailedDependency]",
"tests/test_error.py::test_with_title_and_desc[HTTPRequestHeaderFieldsTooLarge]",
"tests/test_error.py::test_with_title_and_desc[HTTPUnavailableForLegalReasons]",
"tests/test_error.py::test_with_title_and_desc[HTTPInternalServerError]",
"tests/test_error.py::test_with_title_and_desc[HTTPNotImplemented]",
"tests/test_error.py::test_with_title_and_desc[HTTPBadGateway]",
"tests/test_error.py::test_with_title_and_desc[HTTPServiceUnavailable]",
"tests/test_error.py::test_with_title_and_desc[HTTPGatewayTimeout]",
"tests/test_error.py::test_with_title_and_desc[HTTPVersionNotSupported]",
"tests/test_error.py::test_with_title_and_desc[HTTPInsufficientStorage]",
"tests/test_error.py::test_with_title_and_desc[HTTPLoopDetected]",
"tests/test_error.py::test_with_title_and_desc[HTTPNetworkAuthenticationRequired]",
"tests/test_error.py::test_with_retry_after[HTTPServiceUnavailable]",
"tests/test_error.py::test_with_retry_after[HTTPTooManyRequests]",
"tests/test_error.py::test_with_retry_after[HTTPRequestEntityTooLarge]",
"tests/test_error.py::test_http_unauthorized_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unauthorized_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_not_acceptable_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_not_acceptable_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unsupported_media_type_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unsupported_media_type_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_error_repr",
"tests/test_error_handlers.py::TestErrorHandler::test_caught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error_else",
"tests/test_error_handlers.py::TestErrorHandler::test_converted_error",
"tests/test_error_handlers.py::TestErrorHandler::test_handle_not_defined",
"tests/test_error_handlers.py::TestErrorHandler::test_subclass_error",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_duplicate",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass_masked",
"tests/test_headers.py::TestHeaders::test_content_length",
"tests/test_headers.py::TestHeaders::test_default_value",
"tests/test_headers.py::TestHeaders::test_required_header",
"tests/test_headers.py::TestHeaders::test_no_content_length[204",
"tests/test_headers.py::TestHeaders::test_no_content_length[304",
"tests/test_headers.py::TestHeaders::test_content_header_missing",
"tests/test_headers.py::TestHeaders::test_passthrough_request_headers",
"tests/test_headers.py::TestHeaders::test_headers_as_list",
"tests/test_headers.py::TestHeaders::test_default_media_type",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain;",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain-Hello",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_missing_encoding",
"tests/test_headers.py::TestHeaders::test_response_header_helpers_on_get",
"tests/test_headers.py::TestHeaders::test_unicode_location_headers",
"tests/test_headers.py::TestHeaders::test_unicode_headers_convertable",
"tests/test_headers.py::TestHeaders::test_response_set_and_get_header",
"tests/test_headers.py::TestHeaders::test_response_append_header",
"tests/test_headers.py::TestHeaders::test_vary_star",
"tests/test_headers.py::TestHeaders::test_vary_header[vary0-accept-encoding]",
"tests/test_headers.py::TestHeaders::test_vary_header[vary1-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_vary_header[vary2-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_content_type_no_body",
"tests/test_headers.py::TestHeaders::test_no_content_type[204",
"tests/test_headers.py::TestHeaders::test_no_content_type[304",
"tests/test_headers.py::TestHeaders::test_custom_content_type",
"tests/test_headers.py::TestHeaders::test_add_link_single",
"tests/test_headers.py::TestHeaders::test_add_link_multiple",
"tests/test_headers.py::TestHeaders::test_add_link_with_title",
"tests/test_headers.py::TestHeaders::test_add_link_with_title_star",
"tests/test_headers.py::TestHeaders::test_add_link_with_anchor",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang_multi",
"tests/test_headers.py::TestHeaders::test_add_link_with_type_hint",
"tests/test_headers.py::TestHeaders::test_add_link_complex",
"tests/test_headers.py::TestHeaders::test_content_length_options",
"tests/test_hello.py::TestHelloWorld::test_env_headers_list_of_tuples",
"tests/test_hello.py::TestHelloWorld::test_root_route",
"tests/test_hello.py::TestHelloWorld::test_no_route",
"tests/test_hello.py::TestHelloWorld::test_body[/body-resource0-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_body[/bytes-resource1-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_body[/data-resource2-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_no_body_on_head",
"tests/test_hello.py::TestHelloWorld::test_stream_chunked",
"tests/test_hello.py::TestHelloWorld::test_stream_known_len",
"tests/test_hello.py::TestHelloWorld::test_filelike",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[ClosingBytesIO-True]",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[NonClosingBytesIO-False]",
"tests/test_hello.py::TestHelloWorld::test_filelike_using_helper",
"tests/test_hello.py::TestHelloWorld::test_status_not_set",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_get",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_put",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_post_not_allowed",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_report",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_misc",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_simple",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_complex",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_method_not_allowed_with_param",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_default_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_bogus_method",
"tests/test_httperror.py::TestHTTPError::test_base_class",
"tests/test_httperror.py::TestHTTPError::test_no_description_json",
"tests/test_httperror.py::TestHTTPError::test_no_description_xml",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_json_or_xml",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer_no_body",
"tests/test_httperror.py::TestHTTPError::test_custom_new_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_anything",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json]",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/vnd.company.system.project.resource+json;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json-patch+json]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_json",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[text/xml]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/xml]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/vnd.company.system.project.resource+xml;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/atom+xml]",
"tests/test_httperror.py::TestHTTPError::test_unicode_json",
"tests/test_httperror.py::TestHTTPError::test_unicode_xml",
"tests/test_httperror.py::TestHTTPError::test_401",
"tests/test_httperror.py::TestHTTPError::test_404_without_body",
"tests/test_httperror.py::TestHTTPError::test_404_with_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers_double_check",
"tests/test_httperror.py::TestHTTPError::test_405_with_body",
"tests/test_httperror.py::TestHTTPError::test_410_without_body",
"tests/test_httperror.py::TestHTTPError::test_410_with_body",
"tests/test_httperror.py::TestHTTPError::test_411",
"tests/test_httperror.py::TestHTTPError::test_413",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_414",
"tests/test_httperror.py::TestHTTPError::test_414_with_title",
"tests/test_httperror.py::TestHTTPError::test_414_with_description",
"tests/test_httperror.py::TestHTTPError::test_414_with_custom_kwargs",
"tests/test_httperror.py::TestHTTPError::test_416",
"tests/test_httperror.py::TestHTTPError::test_429_no_retry_after",
"tests/test_httperror.py::TestHTTPError::test_429",
"tests/test_httperror.py::TestHTTPError::test_429_datetime",
"tests/test_httperror.py::TestHTTPError::test_503_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_503_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_invalid_header",
"tests/test_httperror.py::TestHTTPError::test_missing_header",
"tests/test_httperror.py::TestHTTPError::test_invalid_param",
"tests/test_httperror.py::TestHTTPError::test_missing_param",
"tests/test_httperror.py::TestHTTPError::test_misc",
"tests/test_httperror.py::TestHTTPError::test_title_default_message_if_none",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_before_hook",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_responder",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_runs_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_survives_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_empty_body",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_request",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_resource",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_runs_process_response",
"tests/test_media_handlers.py::test_base_handler_contract",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_skip_process_resource",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_add_invalid_middleware",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_response_middleware_raises_exception",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_log_get_request",
"tests/test_middleware.py::TestTransactionIdMiddleware::test_generate_trans_id_with_request",
"tests/test_middleware.py::TestSeveralMiddlewares::test_generate_trans_id_and_time_with_request",
"tests/test_middleware.py::TestSeveralMiddlewares::test_legacy_middleware_called_with_correct_args",
"tests/test_middleware.py::TestSeveralMiddlewares::test_middleware_execution_order",
"tests/test_middleware.py::TestSeveralMiddlewares::test_independent_middleware_execution_order",
"tests/test_middleware.py::TestSeveralMiddlewares::test_multiple_reponse_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_outer_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestRemoveBasePathMiddleware::test_base_path_is_removed_before_routing",
"tests/test_middleware.py::TestResourceMiddleware::test_can_access_resource_params",
"tests/test_middleware.py::TestErrorHandling::test_error_composed_before_resp_middleware_called",
"tests/test_middleware.py::TestErrorHandling::test_http_status_raised_from_error_handler",
"tests/test_options.py::TestRequestOptions::test_option_defaults",
"tests/test_options.py::TestRequestOptions::test_options_toggle[keep_blank_qs_values]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_form_urlencoded]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_qs_csv]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[strip_url_path_trailing_slash]",
"tests/test_options.py::TestRequestOptions::test_incorrect_options",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[POST]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PUT]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PATCH]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[DELETE]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[OPTIONS]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[GET]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[HEAD]",
"tests/test_query_params.py::TestPostQueryParams::test_non_ascii",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body_no_content_length",
"tests/test_query_params.py::TestPostQueryParams::test_explicitly_disable_auto_parse",
"tests/test_query_params.py::TestPostQueryParamsDefaultBehavior::test_dont_auto_parse_by_default",
"tests/test_redirects.py::TestRedirects::test_redirect[GET-301",
"tests/test_redirects.py::TestRedirects::test_redirect[POST-302",
"tests/test_redirects.py::TestRedirects::test_redirect[PUT-303",
"tests/test_redirects.py::TestRedirects::test_redirect[DELETE-307",
"tests/test_redirects.py::TestRedirects::test_redirect[HEAD-308",
"tests/test_request_access_route.py::test_remote_addr_only",
"tests/test_request_access_route.py::test_rfc_forwarded",
"tests/test_request_access_route.py::test_malformed_rfc_forwarded",
"tests/test_request_access_route.py::test_x_forwarded_for",
"tests/test_request_access_route.py::test_x_real_ip",
"tests/test_request_access_route.py::test_remote_addr",
"tests/test_request_access_route.py::test_remote_addr_missing",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_qs",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty",
"tests/test_request_attrs.py::TestRequestAttributes::test_host",
"tests/test_request_attrs.py::TestRequestAttributes::test_subdomain",
"tests/test_request_attrs.py::TestRequestAttributes::test_reconstruct_url",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/hello_\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%E5%BB%B6%E5%AE%89]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%C3%A4%C3%B6%C3%BC%C3%9F%E2%82%AC]",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_https",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_http_1_0",
"tests/test_request_attrs.py::TestRequestAttributes::test_relative_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_bogus",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_props",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_prefers",
"tests/test_request_attrs.py::TestRequestAttributes::test_range",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_unit",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_invalid",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_attribute_header",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_nan",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_neg",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[Date-date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[Date-date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_attribute_headers",
"tests/test_request_attrs.py::TestRequestAttributes::test_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty_path",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_type_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_present",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_blank",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_missing",
"tests/test_request_body.py::TestRequestBody::test_empty_body",
"tests/test_request_body.py::TestRequestBody::test_tiny_body",
"tests/test_request_body.py::TestRequestBody::test_tiny_body_overflow",
"tests/test_request_body.py::TestRequestBody::test_read_body",
"tests/test_request_body.py::TestRequestBody::test_bounded_stream_property_empty_body",
"tests/test_request_body.py::TestRequestBody::test_body_stream_wrapper",
"tests/test_request_body.py::TestRequestBody::test_request_repr",
"tests/test_request_context.py::TestRequestContext::test_default_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_failure",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_request_access",
"tests/test_request_forwarded.py::test_no_forwarded_headers",
"tests/test_request_forwarded.py::test_x_forwarded_host",
"tests/test_request_forwarded.py::test_x_forwarded_proto",
"tests/test_request_forwarded.py::test_forwarded_host",
"tests/test_request_forwarded.py::test_forwarded_multiple_params",
"tests/test_request_forwarded.py::test_forwarded_missing_first_hop_host",
"tests/test_request_media.py::test_json[None]",
"tests/test_request_media.py::test_json[*/*]",
"tests/test_request_media.py::test_json[application/json]",
"tests/test_request_media.py::test_json[application/json;",
"tests/test_request_media.py::test_msgpack[application/msgpack]",
"tests/test_request_media.py::test_msgpack[application/msgpack;",
"tests/test_request_media.py::test_msgpack[application/x-msgpack]",
"tests/test_request_media.py::test_unknown_media_type[nope/json]",
"tests/test_request_media.py::test_invalid_json",
"tests/test_request_media.py::test_invalid_msgpack",
"tests/test_request_media.py::test_invalid_stream_fails_gracefully",
"tests/test_request_media.py::test_use_cached_media",
"tests/test_response.py::test_response_set_content_type_set",
"tests/test_response.py::test_response_set_content_type_not_set",
"tests/test_response_body.py::TestResponseBody::test_append_body",
"tests/test_response_body.py::TestResponseBody::test_response_repr",
"tests/test_response_context.py::TestRequestContext::test_default_response_context",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_failure",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_factory",
"tests/test_response_media.py::test_json[*/*]",
"tests/test_response_media.py::test_json[application/json;",
"tests/test_response_media.py::test_msgpack[application/msgpack]",
"tests/test_response_media.py::test_msgpack[application/msgpack;",
"tests/test_response_media.py::test_msgpack[application/x-msgpack]",
"tests/test_response_media.py::test_unknown_media_type",
"tests/test_response_media.py::test_use_cached_media",
"tests/test_response_media.py::test_default_media_type[]",
"tests/test_sinks.py::TestDefaultRouting::test_single_default_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_single_simple_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_single_compiled_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_named_groups",
"tests/test_sinks.py::TestDefaultRouting::test_multiple_patterns",
"tests/test_sinks.py::TestDefaultRouting::test_with_route",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_id",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_both_id",
"tests/test_slots.py::TestSlots::test_slots_request",
"tests/test_slots.py::TestSlots::test_slots_response",
"tests/test_static.py::test_bad_path[/static]",
"tests/test_static.py::test_bad_path[/static/]",
"tests/test_static.py::test_bad_path[/static/.]",
"tests/test_static.py::test_bad_path[/static/..]",
"tests/test_static.py::test_bad_path[/static/../.]",
"tests/test_static.py::test_bad_path[/static/.././etc/passwd]",
"tests/test_static.py::test_bad_path[/static/../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/css/../../secret]",
"tests/test_static.py::test_bad_path[/static/css/../../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/./../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/css/../.\\\\056/etc/passwd]",
"tests/test_static.py::test_bad_path[/static/./\\\\056./etc/passwd]",
"tests/test_static.py::test_bad_path[/static/\\\\056\\\\056/etc/passwd]",
"tests/test_static.py::test_bad_path[/static//test.css]",
"tests/test_static.py::test_bad_path[/static//COM10]",
"tests/test_static.py::test_bad_path[/static/path//test.css]",
"tests/test_static.py::test_bad_path[/static/path///test.css]",
"tests/test_static.py::test_bad_path[/static/path////test.css]",
"tests/test_static.py::test_bad_path[/static/path/foo//test.css]",
"tests/test_static.py::test_bad_path[/static/.\\x00ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x1fssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x80ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x9fssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/~/.ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key?]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key>foo]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key|foo]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key<foo]",
"tests/test_static.py::test_bad_path[/static/something:something]",
"tests/test_static.py::test_bad_path[/static/thing*.sql]",
"tests/test_static.py::test_bad_path[/static/'thing'.sql]",
"tests/test_static.py::test_bad_path[/static/\"thing\".sql]",
"tests/test_static.py::test_bad_path[/static/something.]",
"tests/test_static.py::test_bad_path[/static/something..]",
"tests/test_static.py::test_bad_path[/static/something",
"tests/test_static.py::test_bad_path[/static/",
"tests/test_static.py::test_bad_path[/static/something\\t]",
"tests/test_static.py::test_bad_path[/static/\\tsomething]",
"tests/test_static.py::test_bad_path[/static/ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttx]",
"tests/test_static.py::test_invalid_args[static-/var/www/statics]",
"tests/test_static.py::test_invalid_args[/static-./var/www/statics]",
"tests/test_static.py::test_invalid_args[/static-statics]",
"tests/test_static.py::test_invalid_args[/static-../statics]",
"tests/test_static.py::test_good_path[/static/-/css/test.css-/css/test.css-text/css]",
"tests/test_static.py::test_good_path[/static-/css/test.css-/css/test.css-text/css]",
"tests/test_static.py::test_good_path[/static-/tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-/tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-application/octet-stream]",
"tests/test_static.py::test_good_path[/static-/.test.css-/.test.css-text/css]",
"tests/test_static.py::test_good_path[/some/download/-/report.pdf-/report.pdf-application/pdf]",
"tests/test_static.py::test_good_path[/some/download/-/Fancy",
"tests/test_static.py::test_good_path[/some/download-/report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/../bar/../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/bar/../../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_lifo",
"tests/test_static.py::test_lifo_negative",
"tests/test_static.py::test_downloadable",
"tests/test_static.py::test_downloadable_not_found",
"tests/test_uri_converters.py::test_int_converter[123-None-None-None-123]",
"tests/test_uri_converters.py::test_int_converter[01-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[001-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[0-None-None-None-0]",
"tests/test_uri_converters.py::test_int_converter[00-None-None-None-0]",
"tests/test_uri_converters.py::test_int_converter[1-1-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None0]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-120]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-1-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-None-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-2-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-None-None]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-1-None]",
"tests/test_uri_converters.py::test_int_converter[2-1-1-2-2]",
"tests/test_uri_converters.py::test_int_converter[2-1-2-2-2]",
"tests/test_uri_converters.py::test_int_converter[3-1-1-2-None]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None1]",
"tests/test_uri_converters.py::test_int_converter[12-1-1-12-None]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-121]",
"tests/test_uri_converters.py::test_int_converter[12-2-1-12-12]",
"tests/test_uri_converters.py::test_int_converter[12-2-12-12-12]",
"tests/test_uri_converters.py::test_int_converter[12-2-13-12-None]",
"tests/test_uri_converters.py::test_int_converter[12-2-13-13-None]",
"tests/test_uri_converters.py::test_int_converter_malformed[0x0F]",
"tests/test_uri_converters.py::test_int_converter_malformed[something]",
"tests/test_uri_converters.py::test_int_converter_malformed[]",
"tests/test_uri_converters.py::test_int_converter_malformed[",
"tests/test_uri_converters.py::test_int_converter_malformed[123",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\t]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\n]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\r]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0b]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0c]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\t123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\n123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\r123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0b123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0c123]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[0]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-1]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-10]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17-%m-%d-%y-expected0]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01Z-%Y-%m-%dT%H:%M:%SZ-expected2]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%S-expected3]",
"tests/test_uri_converters.py::test_datetime_converter[2017_19-%Y_%H-expected4]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%SZ-None]",
"tests/test_uri_converters.py::test_datetime_converter[",
"tests/test_uri_converters.py::test_datetime_converter[07",
"tests/test_uri_converters.py::test_datetime_converter_default_format",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56fafe-expected0]",
"tests/test_uri_converters.py::test_uuid_converter[378360d34190-4f9f-a1ed-d1346d56fafe-expected1]",
"tests/test_uri_converters.py::test_uuid_converter[378360d341904f9fa1edd1346d56fafe-expected2]",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:378360d3-4190-4f9f-a1ed-d1346d56fafe-expected3]",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:378360d341904f9fa1edd1346d56fafe-expected4]",
"tests/test_uri_converters.py::test_uuid_converter[",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56fafe",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56faf-None]",
"tests/test_uri_converters.py::test_uuid_converter[3-None]",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56fafg-None]",
"tests/test_uri_converters.py::test_uuid_converter[378360d3_4190_4f9f_a1ed_d1346d56fafe-None]",
"tests/test_uri_templates.py::test_root_path",
"tests/test_uri_templates.py::test_no_vars",
"tests/test_uri_templates.py::test_special_chars",
"tests/test_uri_templates.py::test_single[id]",
"tests/test_uri_templates.py::test_single[id123]",
"tests/test_uri_templates.py::test_single[widget_id]",
"tests/test_uri_templates.py::test_int_converter[/{id:int}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(3)}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123)}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123,",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(2)}]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(min=124)}]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(num_digits=3,",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt}-/1961-to-1969-07-21T02:56:00Z-dt_expected0]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m-%d\")}-/1961-to-1969-07-21-dt_expected1]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}/{timestamp:dt(\"%Y-%m-%d",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m\")}-/1961-to-1969-07-21-None]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}-/widgets/53ec7369-0d5b-47de-a5c4-6172472dfeb2-expected0]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/53ec73690d5b47dea5c46172472dfeb2/orders-expected1]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid()}-/versions/diff/53ec7369-0d5b-47de-a5c4-6172472dfeb2...807f7894-16b9-4db2-8ca3-c8c28e6b6706-expected2]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid}...{right:uuid()}-/versions/diff/53ec7369-0d5b-47de-a5c4-6172472dfeb2...807f7894-16b9-4db2-8ca3-c8c28e6b6706-expected3]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid}-/versions/diff/53ec7369-0d5b-47de-a5c4-6172472dfeb2...807f7894-16b9-4db2-8ca3-c8c28e6b6706-expected4]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/53ec73690d5b47dea5c46172472dfeb/orders-None]",
"tests/test_uri_templates.py::test_uuid_converter_complex_segment",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam}-/something-expected0]",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam(\")\")}:{food_too:spam(\"()\")}-/bacon:eggs-expected1]",
"tests/test_uri_templates.py::test_converter_custom[/({food:spam()}){food_too:spam(\"()\")}-/(bacon)eggs-expected2]",
"tests/test_uri_templates.py::test_single_trailing_slash",
"tests/test_uri_templates.py::test_multiple",
"tests/test_uri_templates.py::test_empty_path_component[//]",
"tests/test_uri_templates.py::test_empty_path_component[//begin]",
"tests/test_uri_templates.py::test_empty_path_component[/end//]",
"tests/test_uri_templates.py::test_empty_path_component[/in//side]",
"tests/test_uri_templates.py::test_relative_path[]",
"tests/test_uri_templates.py::test_relative_path[no]",
"tests/test_uri_templates.py::test_relative_path[no/leading_slash]",
"tests/test_uri_templates.py::test_same_level_complex_var[True]",
"tests/test_uri_templates.py::test_same_level_complex_var[False]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[42]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[API]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this/that]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//c]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_root",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello/world]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hi/there/how/are/you]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_digits",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_prefixed_digits",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[/]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_three_fields",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_malformed_field",
"tests/test_utils.py::TestFalconUtils::test_deprecated_decorator",
"tests/test_utils.py::TestFalconUtils::test_http_now",
"tests/test_utils.py::TestFalconUtils::test_dt_to_http",
"tests/test_utils.py::TestFalconUtils::test_http_date_to_dt",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_none",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_one",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_several",
"tests/test_utils.py::TestFalconUtils::test_uri_encode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_double",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_value",
"tests/test_utils.py::TestFalconUtils::test_uri_decode",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_models_stdlib_quote",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_value_models_stdlib_quote_safe_tilde",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_decode_models_stdlib_unquote_plus",
"tests/test_utils.py::TestFalconUtils::test_parse_query_string",
"tests/test_utils.py::TestFalconUtils::test_parse_host",
"tests/test_utils.py::TestFalconUtils::test_get_http_status",
"tests/test_utils.py::TestFalconTesting::test_path_escape_chars_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_no_prefix_allowed_for_query_strings_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_none_header_value_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_decode_empty_result",
"tests/test_utils.py::TestFalconTesting::test_httpnow_alias_for_backwards_compat",
"tests/test_utils.py::test_simulate_request_protocol[https-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[https-DELETE]",
"tests/test_utils.py::test_simulate_request_protocol[https-GET]",
"tests/test_utils.py::test_simulate_request_protocol[https-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[https-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[https-PATCH]",
"tests/test_utils.py::test_simulate_request_protocol[https-POST]",
"tests/test_utils.py::test_simulate_request_protocol[https-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[https-TRACE]",
"tests/test_utils.py::test_simulate_request_protocol[http-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[http-DELETE]",
"tests/test_utils.py::test_simulate_request_protocol[http-GET]",
"tests/test_utils.py::test_simulate_request_protocol[http-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[http-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[http-PATCH]",
"tests/test_utils.py::test_simulate_request_protocol[http-POST]",
"tests/test_utils.py::test_simulate_request_protocol[http-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[http-TRACE]",
"tests/test_utils.py::test_simulate_free_functions[simulate_get]",
"tests/test_utils.py::test_simulate_free_functions[simulate_head]",
"tests/test_utils.py::test_simulate_free_functions[simulate_post]",
"tests/test_utils.py::test_simulate_free_functions[simulate_put]",
"tests/test_utils.py::test_simulate_free_functions[simulate_options]",
"tests/test_utils.py::test_simulate_free_functions[simulate_patch]",
"tests/test_utils.py::test_simulate_free_functions[simulate_delete]",
"tests/test_utils.py::TestFalconTestCase::test_status",
"tests/test_utils.py::TestFalconTestCase::test_wsgi_iterable_not_closeable",
"tests/test_utils.py::TestFalconTestCase::test_path_must_start_with_slash",
"tests/test_utils.py::TestFalconTestCase::test_cached_text_in_result",
"tests/test_utils.py::TestFalconTestCase::test_simple_resource_body_json_xor",
"tests/test_utils.py::TestFalconTestCase::test_query_string",
"tests/test_utils.py::TestFalconTestCase::test_query_string_no_question",
"tests/test_utils.py::TestFalconTestCase::test_query_string_in_path",
"tests/test_utils.py::TestCaseFancyAPI::test_something",
"tests/test_utils.py::TestNoApiClass::test_something",
"tests/test_utils.py::TestSetupApi::test_something",
"tests/test_validators.py::test_jsonschema_validation_success",
"tests/test_validators.py::test_jsonschema_validation_failure",
"tests/test_wsgi.py::TestWSGIServer::test_get",
"tests/test_wsgi.py::TestWSGIServer::test_put",
"tests/test_wsgi.py::TestWSGIServer::test_head_405",
"tests/test_wsgi.py::TestWSGIServer::test_post",
"tests/test_wsgi.py::TestWSGIServer::test_post_invalid_content_length",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream_no_body",
"tests/test_wsgi_errors.py::TestWSGIError::test_responder_logged_bytestring",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_srmock",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_pep3333",
"tests/test_wsgiref_inputwrapper_with_size.py::TestWsgiRefInputWrapper::test_resources_can_read_request_stream_during_tests"
] | [
"tests/test_response_media.py::test_default_media_type[media_type1]"
] | [] | [] | Apache License 2.0 | 1,938 | 2,849 | [
"falcon/api.py",
"falcon/response.py",
"falcon/response_helpers.py",
"falcon/routing/__init__.py"
] |
CartoDB__cartoframes-319 | 8dcdd8361140d6e087c7301042bd5ba9dc475001 | 2017-12-07 17:46:33 | 088d019b9e95f68def26afa1efe6b9a73ff632fd | diff --git a/cartoframes/context.py b/cartoframes/context.py
index acac1b3e..f05c9f3b 100644
--- a/cartoframes/context.py
+++ b/cartoframes/context.py
@@ -1013,6 +1013,9 @@ class CartoContext(object):
query=layer.orig_query,
geoms=','.join(g['geom_type'] for g in resp['rows']),
common_geom=resp['rows'][0]['geom_type']))
+ elif len(resp['rows']) == 0:
+ raise ValueError('No geometry for layer. Check all layer tables '
+ 'and queries to ensure there are geometries.')
return resp['rows'][0]['geom_type']
def data_boundaries(self, df=None, table_name=None):
@@ -1303,8 +1306,8 @@ class CartoContext(object):
median_income = cc.data_discovery('transaction_events',
regex='.*median income.*',
time='2011 - 2015')
- df = cc.data(median_income,
- 'transaction_event')
+ df = cc.data('transaction_events',
+ median_income)
Pass in cherry-picked measures from the Data Observatory catalog.
The rest of the metadata will be filled in, but it's important to
diff --git a/cartoframes/layer.py b/cartoframes/layer.py
index a10acf78..789344c5 100644
--- a/cartoframes/layer.py
+++ b/cartoframes/layer.py
@@ -7,7 +7,7 @@ basemap layers.
import pandas as pd
import webcolors
-from cartoframes.utils import cssify, join_url
+from cartoframes.utils import cssify, join_url, minify_sql
from cartoframes.styling import BinMethod, mint, antique, get_scheme_cartocss
# colors map data layers without color specified
@@ -388,7 +388,7 @@ class QueryLayer(AbstractLayer):
duration = self.time['duration']
if (self.color in self.style_cols and
self.style_cols[self.color] in ('string', 'boolean', )):
- self.query = ' '.join([s.strip() for s in [
+ self.query = minify_sql([
'SELECT',
' orig.*, __wrap.cf_value_{col}',
'FROM ({query}) AS orig, (',
@@ -404,7 +404,7 @@ class QueryLayer(AbstractLayer):
' ) AS _wrap',
') AS __wrap',
'WHERE __wrap.{col} = orig.{col}',
- ]]).format(col=self.color, query=self.orig_query)
+ ]).format(col=self.color, query=self.orig_query)
agg_func = '\'CDB_Math_Mode(cf_value_{})\''.format(self.color)
self.scheme = {
'bins': ','.join(str(i) for i in range(1, 11)),
@@ -476,6 +476,11 @@ class QueryLayer(AbstractLayer):
'comp-op': 'source-over',
}
})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'marker-fill': '#666'}
+ })
for t in range(1, self.time['trails'] + 1):
# Trails decay as 1/2^n, and grow 30% at each step
trail_temp = cssify({
@@ -487,33 +492,56 @@ class QueryLayer(AbstractLayer):
css += trail_temp
return css
else:
- return cssify({
- # Point CSS
- "#layer['mapnik::geometry_type'=1]": {
- 'marker-width': size_style,
- 'marker-fill': color_style,
- 'marker-fill-opacity': '1',
- 'marker-allow-overlap': 'true',
- 'marker-line-width': '0.5',
- 'marker-line-color': line_color,
- 'marker-line-opacity': '1',
- },
- # Line CSS
- "#layer['mapnik::geometry_type'=2]": {
- 'line-width': '1.5',
- 'line-color': color_style,
- },
- # Polygon CSS
- "#layer['mapnik::geometry_type'=3]": {
- 'polygon-fill': color_style,
- 'polygon-opacity': '0.9',
- 'polygon-gamma': '0.5',
- 'line-color': '#FFF',
- 'line-width': '0.5',
- 'line-opacity': '0.25',
- 'line-comp-op': 'hard-light',
- }
- })
+ if self.geom_type == 'point':
+ css = cssify({
+ # Point CSS
+ "#layer": {
+ 'marker-width': size_style,
+ 'marker-fill': color_style,
+ 'marker-fill-opacity': '1',
+ 'marker-allow-overlap': 'true',
+ 'marker-line-width': '0.5',
+ 'marker-line-color': line_color,
+ 'marker-line-opacity': '1',
+ }})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'marker-fill': '#ccc'}
+ })
+ return css
+ elif self.geom_type == 'line':
+ css = cssify({
+ "#layer": {
+ 'line-width': '1.5',
+ 'line-color': color_style,
+ }})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'line-color': '#ccc'}
+ })
+ return css
+ elif self.geom_type == 'polygon':
+ css = cssify({
+ "#layer": {
+ 'polygon-fill': color_style,
+ 'polygon-opacity': '0.9',
+ 'polygon-gamma': '0.5',
+ 'line-color': '#FFF',
+ 'line-width': '0.5',
+ 'line-opacity': '0.25',
+ 'line-comp-op': 'hard-light',
+ }})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'polygon-fill': '#ccc'}
+ })
+ return css
+ else:
+ raise ValueError('Unsupported geometry type: {}'.format(
+ self.geom_type))
class Layer(QueryLayer):
diff --git a/cartoframes/utils.py b/cartoframes/utils.py
index 820398bc..36365850 100644
--- a/cartoframes/utils.py
+++ b/cartoframes/utils.py
@@ -17,7 +17,7 @@ def cssify(css_dict):
css += ' {field}: {field_value};'.format(field=field,
field_value=field_value)
css += '} '
- return css
+ return css.strip()
def normalize_colnames(columns):
| null column values in styling not handled correctly
Dataset is earthquakes and styling for both color and size is ramp of magnitude (5 bins, equal interval). Null values in the column used for styling are being colored as though they are in the highest color bin using CARTOframes.

Equivalent styling in CARTO Builder shows that the null values default to the lowest color in the ramp.

We should change this so the null values correspond to the lowest value of ramp, the same as in Builder.
| CartoDB/cartoframes | diff --git a/test/test_context.py b/test/test_context.py
index cdc1d0c6..87fb4383 100644
--- a/test/test_context.py
+++ b/test/test_context.py
@@ -566,6 +566,16 @@ class TestCartoContext(unittest.TestCase):
cc.map(layers=[Layer(self.test_read_table, time='cartodb_id'),
Layer(self.test_read_table, time='cartodb_id')])
+ # no geometry
+ with self.assertRaises(ValueError):
+ cc.map(layers=QueryLayer('''
+ SELECT
+ null::geometry as the_geom,
+ null::geometry as the_geom_webmercator,
+ row_number() OVER () as cartodb_id
+ FROM generate_series(1, 10) as m(i)
+ '''))
+
@unittest.skipIf(WILL_SKIP, 'no cartocredentials, skipping')
def test_cartocontext_map_time(self):
"""CartoContext.map time options"""
diff --git a/test/test_layer.py b/test/test_layer.py
index 8e1699e5..806afe2d 100644
--- a/test/test_layer.py
+++ b/test/test_layer.py
@@ -145,6 +145,7 @@ class TestQueryLayer(unittest.TestCase):
for idx, color in enumerate(str_colors):
qlayer = QueryLayer(self.query, color=color)
+ qlayer.geom_type = 'point'
if color == 'cookie_monster':
qlayer.style_cols[color] = 'number'
qlayer._setup([BaseMap(), qlayer], 1)
@@ -159,6 +160,7 @@ class TestQueryLayer(unittest.TestCase):
qlayer = QueryLayer(self.query, color='datetime_column')
qlayer.style_cols['datetime_column'] = 'date'
qlayer._setup([BaseMap(), qlayer], 1)
+
# Exception testing
# color column cannot be a geometry column
with self.assertRaises(ValueError,
@@ -192,10 +194,12 @@ class TestQueryLayer(unittest.TestCase):
dict(name='Antique', bin_method='',
bins=','.join(str(i) for i in range(1, 11))))
# expect category maps query
+ with open('qlayerquery.txt', 'w') as f:
+ f.write(ql.query)
self.assertRegexpMatches(ql.query,
- '^SELECT orig\.\*, '
- '__wrap.cf_value_colorcol.* '
- 'GROUP BY.*orig\.colorcol$')
+ '(?s)^SELECT\norig\.\*,\s__wrap\.'
+ 'cf_value_colorcol\n.*GROUP\sBY.*orig\.'
+ 'colorcol$')
# cartocss should have cdb math mode
self.assertRegexpMatches(ql.cartocss,
'.*CDB_Math_Mode\(cf_value_colorcol\).*')
@@ -346,8 +350,31 @@ class TestQueryLayer(unittest.TestCase):
"""layer.QueryLayer._get_cartocss"""
qlayer = QueryLayer(self.query, size=dict(column='cold_brew', min=10,
max=20))
+ qlayer.geom_type = 'point'
self.assertRegexpMatches(
qlayer._get_cartocss(BaseMap()),
('.*marker-width:\sramp\(\[cold_brew\],\srange\(10,20\),\s'
'quantiles\(5\)\).*')
)
+
+ # test line cartocss
+ qlayer = QueryLayer(self.query)
+ qlayer.geom_type = 'line'
+ self.assertRegexpMatches(qlayer._get_cartocss(BaseMap()),
+ '^\#layer.*line\-width.*$')
+ # test point, line, polygon
+ for g in ('point', 'line', 'polygon', ):
+ styles = {'point': 'marker\-fill',
+ 'line': 'line\-color',
+ 'polygon': 'polygon\-fill'}
+ qlayer = QueryLayer(self.query, color='colname')
+ qlayer.geom_type = g
+ self.assertRegexpMatches(qlayer._get_cartocss(BaseMap()),
+ '^\#layer.*{}.*\}}$'.format(styles[g]))
+
+ # geometry type should be defined
+ with self.assertRaises(ValueError,
+ msg='invalid geometry type'):
+ ql = QueryLayer(self.query, color='red')
+ ql.geom_type = 'notvalid'
+ ql._get_cartocss(BaseMap())
diff --git a/test/test_utils.py b/test/test_utils.py
index 4be5fb25..af4db384 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -85,7 +85,7 @@ class TestUtils(unittest.TestCase):
"marker-width: 6; marker-fill: yellow; "
"marker-fill-opacity: 1; marker-allow-overlap: "
"true; marker-line-width: 0.5; marker-line-color: "
- "black; marker-line-opacity: 1;} "),
+ "black; marker-line-opacity: 1;}"),
msg="point style")
# polygon style
@@ -96,7 +96,7 @@ class TestUtils(unittest.TestCase):
"#cc607d, #9e3963, #672044), quantiles); "
"polygon-opacity: 0.9; polygon-gamma: 0.5; "
"line-color: #FFF; line-width: 0.5; line-opacity: "
- "0.25; line-comp-op: hard-light;} "),
+ "0.25; line-comp-op: hard-light;}"),
msg="polygon style")
# complex style
@@ -113,7 +113,7 @@ class TestUtils(unittest.TestCase):
"polygon-fill: blue; polygon-opacity: 0.9; "
"polygon-gamma: 0.5; line-color: #FFF; line-width: "
"0.5; line-opacity: 0.25; "
- "line-comp-op: hard-light;} "),
+ "line-comp-op: hard-light;}"),
msg="multi-layer styling")
def test_norm_colname(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
appdirs==1.4.4
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
carto==1.11.3
-e git+https://github.com/CartoDB/cartoframes.git@8dcdd8361140d6e087c7301042bd5ba9dc475001#egg=cartoframes
certifi==2021.5.30
charset-normalizer==2.0.12
decorator==5.1.1
docutils==0.18.1
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
MarkupSafe==2.0.1
numpy==1.19.5
packaging==21.3
pandas==1.1.5
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
pockets==0.9.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrestcli==0.6.11
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
Shapely==1.8.5.post1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
tqdm==4.64.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webcolors==1.7
zipp==3.6.0
| name: cartoframes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- appdirs==1.4.4
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- carto==1.11.3
- charset-normalizer==2.0.12
- decorator==5.1.1
- docutils==0.18.1
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- markupsafe==2.0.1
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- pockets==0.9.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrestcli==0.6.11
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- shapely==1.8.5.post1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tqdm==4.64.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webcolors==1.7
- zipp==3.6.0
prefix: /opt/conda/envs/cartoframes
| [
"test/test_layer.py::TestQueryLayer::test_querylayer_get_cartocss",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_category",
"test/test_utils.py::TestUtils::test_cssify"
] | [
"test/test_context.py::TestCartoContext::test_add_encoded_geom",
"test/test_context.py::TestCartoContext::test_cartocontext",
"test/test_context.py::TestCartoContext::test_cartocontext_check_query",
"test/test_context.py::TestCartoContext::test_cartocontext_credentials",
"test/test_context.py::TestCartoContext::test_cartocontext_delete",
"test/test_context.py::TestCartoContext::test_cartocontext_handle_import",
"test/test_context.py::TestCartoContext::test_cartocontext_isorguser",
"test/test_context.py::TestCartoContext::test_cartocontext_map",
"test/test_context.py::TestCartoContext::test_cartocontext_map_geom_type",
"test/test_context.py::TestCartoContext::test_cartocontext_map_time",
"test/test_context.py::TestCartoContext::test_cartocontext_mixed_case",
"test/test_context.py::TestCartoContext::test_cartocontext_read",
"test/test_context.py::TestCartoContext::test_cartocontext_table_exists",
"test/test_context.py::TestCartoContext::test_cartocontext_write",
"test/test_context.py::TestCartoContext::test_cartocontext_write_index",
"test/test_context.py::TestCartoContext::test_cartoframes_query",
"test/test_context.py::TestCartoContext::test_cartoframes_sync",
"test/test_context.py::TestCartoContext::test_data",
"test/test_context.py::TestCartoContext::test_data_discovery",
"test/test_context.py::TestCartoContext::test_debug_print",
"test/test_context.py::TestCartoContext::test_get_bounds",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus_methods",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus_repr"
] | [
"test/test_context.py::TestCartoContext::test_cartocontext_send_dataframe",
"test/test_context.py::TestCartoContext::test_decode_geom",
"test/test_context.py::TestCartoContext::test_df2pg_schema",
"test/test_context.py::TestCartoContext::test_dtypes2pg",
"test/test_context.py::TestCartoContext::test_encode_geom",
"test/test_context.py::TestCartoContext::test_pg2dtypes",
"test/test_layer.py::TestAbstractLayer::test_class",
"test/test_layer.py::TestLayer::test_layer_setup_dataframe",
"test/test_layer.py::TestBaseMap::test_basemap_invalid",
"test/test_layer.py::TestBaseMap::test_basemap_source",
"test/test_layer.py::TestQueryLayer::test_querylayer_colors",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_and_time",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_column_key",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_default",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_defaults",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_default",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_errors",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_numeric",
"test/test_utils.py::TestUtils::test_dict_items",
"test/test_utils.py::TestUtils::test_importify_params",
"test/test_utils.py::TestUtils::test_norm_colname",
"test/test_utils.py::TestUtils::test_normalize_colnames"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,940 | 1,627 | [
"cartoframes/context.py",
"cartoframes/layer.py",
"cartoframes/utils.py"
] |
|
dpkp__kafka-python-1320 | 009290ddd5d4616d70bff93f841e773af8b22750 | 2017-12-08 02:16:09 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | tvoinarovskyi: Looks good. | diff --git a/kafka/conn.py b/kafka/conn.py
index e20210a..2926e2f 100644
--- a/kafka/conn.py
+++ b/kafka/conn.py
@@ -251,67 +251,42 @@ class BrokerConnection(object):
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = None
- self._gai_index = 0
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
+ def _next_afi_host_port(self):
+ if not self._gai:
+ self._gai = dns_lookup(self._init_host, self._init_port, self._init_afi)
+ if not self._gai:
+ log.error('DNS lookup failed for %s:%i (%s)',
+ self._init_host, self._init_port, self._init_afi)
+ return
+
+ afi, _, __, ___, sockaddr = self._gai.pop(0)
+ host, port = sockaddr[:2]
+ return (afi, host, port)
+
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED:
- log.debug('%s: creating new socket', self)
- # if self.afi is set to AF_UNSPEC, then we need to do a name
- # resolution and try all available address families
- if self._init_afi == socket.AF_UNSPEC:
- if self._gai is None:
- # XXX: all DNS functions in Python are blocking. If we really
- # want to be non-blocking here, we need to use a 3rd-party
- # library like python-adns, or move resolution onto its
- # own thread. This will be subject to the default libc
- # name resolution timeout (5s on most Linux boxes)
- try:
- self._gai = socket.getaddrinfo(self._init_host,
- self._init_port,
- socket.AF_UNSPEC,
- socket.SOCK_STREAM)
- except socket.gaierror as ex:
- log.warning('DNS lookup failed for %s:%d,'
- ' exception was %s. Is your'
- ' advertised.listeners (called'
- ' advertised.host.name before Kafka 9)'
- ' correct and resolvable?',
- self._init_host, self._init_port, ex)
- self._gai = []
- self._gai_index = 0
- else:
- # if self._gai already exists, then we should try the next
- # name
- self._gai_index += 1
- while True:
- if self._gai_index >= len(self._gai):
- error = 'Unable to connect to any of the names for {0}:{1}'.format(
- self._init_host, self._init_port)
- log.error(error)
- self.close(Errors.ConnectionError(error))
- return
- afi, _, __, ___, sockaddr = self._gai[self._gai_index]
- if afi not in (socket.AF_INET, socket.AF_INET6):
- self._gai_index += 1
- continue
- break
- self.host, self.port = sockaddr[:2]
- self._sock = socket.socket(afi, socket.SOCK_STREAM)
+ self.last_attempt = time.time()
+ next_lookup = self._next_afi_host_port()
+ if not next_lookup:
+ self.close(Errors.ConnectionError('DNS failure'))
+ return
else:
- self._sock = socket.socket(self._init_afi, socket.SOCK_STREAM)
+ log.debug('%s: creating new socket', self)
+ self.afi, self.host, self.port = next_lookup
+ self._sock = socket.socket(self.afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
- self.last_attempt = time.time()
self.state = ConnectionStates.CONNECTING
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
self._wrap_ssl()
@@ -328,11 +303,6 @@ class BrokerConnection(object):
ret = None
try:
ret = self._sock.connect_ex((self.host, self.port))
- # if we got here through a host lookup, we've found a host,port,af tuple
- # that works save it so we don't do a GAI lookup again
- if self._gai is not None:
- self.afi = self._sock.family
- self._gai = None
except socket.error as err:
ret = err.errno
@@ -645,23 +615,15 @@ class BrokerConnection(object):
will be failed with this exception.
Default: kafka.errors.ConnectionError.
"""
- if self.state is ConnectionStates.DISCONNECTED:
- if error is not None:
- if sys.version_info >= (3, 2):
- log.warning('%s: close() called on disconnected connection with error: %s', self, error, stack_info=True)
- else:
- log.warning('%s: close() called on disconnected connection with error: %s', self, error)
- return
-
log.info('%s: Closing connection. %s', self, error or '')
- self.state = ConnectionStates.DISCONNECTING
- self.config['state_change_callback'](self)
+ if self.state is not ConnectionStates.DISCONNECTED:
+ self.state = ConnectionStates.DISCONNECTING
+ self.config['state_change_callback'](self)
self._update_reconnect_backoff()
if self._sock:
self._sock.close()
self._sock = None
self.state = ConnectionStates.DISCONNECTED
- self.last_attempt = time.time()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
@@ -1170,3 +1132,29 @@ def collect_hosts(hosts, randomize=True):
shuffle(result)
return result
+
+
+def is_inet_4_or_6(gai):
+ """Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
+ return gai[0] in (socket.AF_INET, socket.AF_INET6)
+
+
+def dns_lookup(host, port, afi=socket.AF_UNSPEC):
+ """Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
+ # XXX: all DNS functions in Python are blocking. If we really
+ # want to be non-blocking here, we need to use a 3rd-party
+ # library like python-adns, or move resolution onto its
+ # own thread. This will be subject to the default libc
+ # name resolution timeout (5s on most Linux boxes)
+ try:
+ return list(filter(is_inet_4_or_6,
+ socket.getaddrinfo(host, port, afi,
+ socket.SOCK_STREAM)))
+ except socket.gaierror as ex:
+ log.warning('DNS lookup failed for %s:%d,'
+ ' exception was %s. Is your'
+ ' advertised.listeners (called'
+ ' advertised.host.name before Kafka 9)'
+ ' correct and resolvable?',
+ host, port, ex)
+ return []
diff --git a/kafka/protocol/types.py b/kafka/protocol/types.py
index 22b49a4..516b957 100644
--- a/kafka/protocol/types.py
+++ b/kafka/protocol/types.py
@@ -8,16 +8,20 @@ from .abstract import AbstractType
def _pack(f, value):
try:
return pack(f, value)
- except error:
- raise ValueError(error)
+ except error as e:
+ raise ValueError("Error encountered when attempting to convert value: "
+ "{} to struct format: '{}', hit error: {}"
+ .format(value, f, e))
def _unpack(f, data):
try:
(value,) = unpack(f, data)
return value
- except error:
- raise ValueError(error)
+ except error as e:
+ raise ValueError("Error encountered when attempting to convert value: "
+ "{} to struct format: '{}', hit error: {}"
+ .format(value, f, e))
class Int8(AbstractType):
diff --git a/kafka/util.py b/kafka/util.py
index de8f228..181f67f 100644
--- a/kafka/util.py
+++ b/kafka/util.py
@@ -12,14 +12,20 @@ from kafka.vendor import six
from kafka.errors import BufferUnderflowError
-def crc32(data):
- crc = binascii.crc32(data)
- # py2 and py3 behave a little differently
- # CRC is encoded as a signed int in kafka protocol
- # so we'll convert the py3 unsigned result to signed
- if six.PY3 and crc >= 2**31:
- crc -= 2**32
- return crc
+if six.PY3:
+ MAX_INT = 2 ** 31
+ TO_SIGNED = 2 ** 32
+
+ def crc32(data):
+ crc = binascii.crc32(data)
+ # py2 and py3 behave a little differently
+ # CRC is encoded as a signed int in kafka protocol
+ # so we'll convert the py3 unsigned result to signed
+ if crc >= MAX_INT:
+ crc -= TO_SIGNED
+ return crc
+else:
+ from binascii import crc32
def write_int_string(s):
| Handling of struct errors doesn't print the specific error message
When a `struct.error` is thrown during `_pack()` or `_unpack()`, we catch and re-raise as a `ValueError`: https://github.com/dpkp/kafka-python/blob/master/kafka/protocol/types.py#L11-L12
However, we're shadowing the word `error` so we lose a handle on the specific exception and cannot print the specific error message. | dpkp/kafka-python | diff --git a/test/test_conn.py b/test/test_conn.py
index 1621e60..ef7925a 100644
--- a/test/test_conn.py
+++ b/test/test_conn.py
@@ -267,3 +267,28 @@ def test_lookup_on_connect():
m.assert_called_once_with(hostname, port, 0, 1)
conn.close()
assert conn.host == ip2
+
+
+def test_relookup_on_failure():
+ hostname = 'example.org'
+ port = 9092
+ conn = BrokerConnection(hostname, port, socket.AF_UNSPEC)
+ assert conn.host == conn.hostname == hostname
+ mock_return1 = []
+ with mock.patch("socket.getaddrinfo", return_value=mock_return1) as m:
+ last_attempt = conn.last_attempt
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ assert conn.disconnected()
+ assert conn.last_attempt > last_attempt
+
+ ip2 = '127.0.0.2'
+ mock_return2 = [
+ (2, 2, 17, '', (ip2, 9092)),
+ ]
+
+ with mock.patch("socket.getaddrinfo", return_value=mock_return2) as m:
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ conn.close()
+ assert conn.host == ip2
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog",
"pytest-sugar",
"pytest-mock",
"mock",
"python-snappy",
"lz4tools",
"xxhash"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@009290ddd5d4616d70bff93f841e773af8b22750#egg=kafka_python
lz4tools==1.3.1.2
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- lz4tools==1.3.1.2
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_conn.py::test_relookup_on_failure"
] | [] | [
"test/test_conn.py::test_connect[states0]",
"test/test_conn.py::test_connect[states1]",
"test/test_conn.py::test_connect[states2]",
"test/test_conn.py::test_connect[states3]",
"test/test_conn.py::test_connect[states4]",
"test/test_conn.py::test_connect_timeout",
"test/test_conn.py::test_blacked_out",
"test/test_conn.py::test_connected",
"test/test_conn.py::test_connecting",
"test/test_conn.py::test_send_disconnected",
"test/test_conn.py::test_send_connecting",
"test/test_conn.py::test_send_max_ifr",
"test/test_conn.py::test_send_no_response",
"test/test_conn.py::test_send_response",
"test/test_conn.py::test_send_error",
"test/test_conn.py::test_can_send_more",
"test/test_conn.py::test_recv_disconnected",
"test/test_conn.py::test_recv",
"test/test_conn.py::test_close",
"test/test_conn.py::test_collect_hosts__happy_path",
"test/test_conn.py::test_collect_hosts__ipv6",
"test/test_conn.py::test_collect_hosts__string_list",
"test/test_conn.py::test_collect_hosts__with_spaces",
"test/test_conn.py::test_lookup_on_connect"
] | [] | Apache License 2.0 | 1,941 | 2,278 | [
"kafka/conn.py",
"kafka/protocol/types.py",
"kafka/util.py"
] |
ofek__bit-24 | 37182647309b54934fb49078d4c7a2fb21d7eb47 | 2017-12-18 20:34:04 | 37182647309b54934fb49078d4c7a2fb21d7eb47 | teran-mckinney: @bjarnemagnussen, could you also review this since it impacts your pull request?
Unfortunately, I know you'll have to rebase undo most of this with your change.
Thank you!
teran-mckinney: @ofek, could you consider merging this if it looks good to you? Would also appreciate if you can bump the version as this can prevent accidental Bitcoin loss and the tests still pass.
I will merge it into bitcash shortly.
teran-mckinney: @ofek do you have the time to review this? Thank you!
ofek: @teran-mckinney Sorry! I recently started a new job which has decreased my OSS time. Can you please ping me again in a few days? Also, would you like write access/become a maintainer? Do you have time? I don't want this project to die :smile:
teran-mckinney: I completely understand. Congratulations on the new job!
Wow, most certainly. I'm happy to take on bit, especially for smaller patches like this. I have the time for it and I'm already using bit in a couple projects.
I'll ping you in a few days. Thanks for getting back to me.
teran-mckinney: Hey @ofek. Just pinging you. | diff --git a/bit/format.py b/bit/format.py
index 5ff8679..088d990 100644
--- a/bit/format.py
+++ b/bit/format.py
@@ -35,6 +35,8 @@ def verify_sig(signature, data, public_key):
def address_to_public_key_hash(address):
+ # Raise ValueError if we cannot identify the address.
+ get_version(address)
return b58decode_check(address)[1:]
| Pay to scripthash
I have no experience with paying to scripthashes except when it's worked automagically for me before.
In #12 it looks like the initial workins of P2SH are being added. I'm not sure how straight forward P2SH is.
But... for the time being shouldn't we throw an exception if we try to `send()` to an address that does not begin with 1? Are there any sideeffects to doing so?
I think I just lost a fair bit of coin not reviewing this properly. Hopefully if we add in an exception we can keep others from doing the same. | ofek/bit | diff --git a/tests/samples.py b/tests/samples.py
index 8a9ad70..82b64ae 100644
--- a/tests/samples.py
+++ b/tests/samples.py
@@ -1,8 +1,10 @@
BINARY_ADDRESS = b'\x00\x92F\x1b\xdeb\x83\xb4a\xec\xe7\xdd\xf4\xdb\xf1\xe0\xa4\x8b\xd1\x13\xd8&E\xb4\xbf'
BITCOIN_ADDRESS = '1ELReFsTCUY2mfaDTy32qxYiT49z786eFg'
BITCOIN_ADDRESS_COMPRESSED = '1ExJJsNLQDNVVM1s1sdyt1o5P3GC5r32UG'
+BITCOIN_ADDRESS_PAY2SH = '39SrGQEfFXcTYJhBvjZeQja66Cpz82EEUn'
BITCOIN_ADDRESS_TEST = 'mtrNwJxS1VyHYn3qBY1Qfsm3K3kh1mGRMS'
BITCOIN_ADDRESS_TEST_COMPRESSED = 'muUFbvTKDEokGTVUjScMhw1QF2rtv5hxCz'
+BITCOIN_ADDRESS_TEST_PAY2SH = '2NFKbBHzzh32q5DcZJNgZE9sF7gYmtPbawk'
PRIVATE_KEY_BYTES = b'\xc2\x8a\x9f\x80s\x8fw\rRx\x03\xa5f\xcfo\xc3\xed\xf6\xce\xa5\x86\xc4\xfcJR#\xa5\xady~\x1a\xc3'
PRIVATE_KEY_DER = (b"0\x81\x84\x02\x01\x000\x10\x06\x07*\x86H\xce=\x02\x01\x06"
b"\x05+\x81\x04\x00\n\x04m0k\x02\x01\x01\x04 \xc2\x8a\x9f"
diff --git a/tests/test_format.py b/tests/test_format.py
index b338b03..86dcace 100644
--- a/tests/test_format.py
+++ b/tests/test_format.py
@@ -6,9 +6,11 @@ from bit.format import (
public_key_to_address, verify_sig, wif_checksum_check, wif_to_bytes
)
from .samples import (
- BITCOIN_ADDRESS, BITCOIN_ADDRESS_COMPRESSED, BITCOIN_ADDRESS_TEST_COMPRESSED,
- BITCOIN_ADDRESS_TEST, PRIVATE_KEY_BYTES, PUBKEY_HASH, PUBKEY_HASH_COMPRESSED,
- PUBLIC_KEY_COMPRESSED, PUBLIC_KEY_UNCOMPRESSED, PUBLIC_KEY_X, PUBLIC_KEY_Y,
+ BITCOIN_ADDRESS, BITCOIN_ADDRESS_COMPRESSED, BITCOIN_ADDRESS_PAY2SH,
+ BITCOIN_ADDRESS_TEST_COMPRESSED, BITCOIN_ADDRESS_TEST,
+ BITCOIN_ADDRESS_TEST_PAY2SH, PRIVATE_KEY_BYTES, PUBKEY_HASH,
+ PUBKEY_HASH_COMPRESSED, PUBLIC_KEY_COMPRESSED, PUBLIC_KEY_UNCOMPRESSED,
+ PUBLIC_KEY_X, PUBLIC_KEY_Y,
WALLET_FORMAT_COMPRESSED_MAIN, WALLET_FORMAT_COMPRESSED_TEST,
WALLET_FORMAT_MAIN, WALLET_FORMAT_TEST
)
@@ -41,6 +43,14 @@ class TestGetVersion:
with pytest.raises(ValueError):
get_version('dg2dNAjuezub6iJVPNML5pW5ZQvtA9ocL')
+ def test_mainnet_pay2sh(self):
+ with pytest.raises(ValueError):
+ get_version(BITCOIN_ADDRESS_PAY2SH)
+
+ def test_testnet_pay2sh(self):
+ with pytest.raises(ValueError):
+ get_version(BITCOIN_ADDRESS_TEST_PAY2SH)
+
class TestVerifySig:
def test_valid(self):
@@ -146,3 +156,7 @@ def test_point_to_public_key():
def test_address_to_public_key_hash():
assert address_to_public_key_hash(BITCOIN_ADDRESS) == PUBKEY_HASH
assert address_to_public_key_hash(BITCOIN_ADDRESS_COMPRESSED) == PUBKEY_HASH_COMPRESSED
+ with pytest.raises(ValueError):
+ address_to_public_key_hash(BITCOIN_ADDRESS_PAY2SH)
+ with pytest.raises(ValueError):
+ address_to_public_key_hash(BITCOIN_ADDRESS_TEST_PAY2SH)
diff --git a/tests/test_wallet.py b/tests/test_wallet.py
index 44d882d..6fbbfdd 100644
--- a/tests/test_wallet.py
+++ b/tests/test_wallet.py
@@ -238,6 +238,21 @@ class TestPrivateKeyTestnet:
assert current > initial
+ def test_send_pay2sh(self):
+ """
+ We don't yet support pay2sh, so we must throw an exception if we get one.
+ Otherwise, we could send coins into an unrecoverable blackhole, needlessly.
+ pay2sh addresses begin with 2 in testnet and 3 on mainnet.
+ """
+ if TRAVIS and sys.version_info[:2] != (3, 6):
+ return
+
+ private_key = PrivateKeyTestnet(WALLET_FORMAT_COMPRESSED_TEST)
+ private_key.get_unspents()
+
+ with pytest.raises(ValueError):
+ private_key.send([('2NFKbBHzzh32q5DcZJNgZE9sF7gYmtPbawk', 1, 'jpy')])
+
def test_cold_storage(self):
if TRAVIS and sys.version_info[:2] != (3, 6):
return
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[cli,cache]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"codecov",
"coverage"
],
"pre_install": [
"pip install -U setuptools pip"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
asn1crypto==1.5.1
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
-e git+https://github.com/ofek/bit.git@37182647309b54934fb49078d4c7a2fb21d7eb47#egg=bit
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
coincurve==16.0.0
coverage==6.2
cryptography==40.0.2
dataclasses==0.8
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
privy==6.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
requests==2.27.1
tinydb==4.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: bit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- asn1crypto==1.5.1
- cffi==1.15.1
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- coincurve==16.0.0
- coverage==6.2
- cryptography==40.0.2
- dataclasses==0.8
- idna==3.10
- lmdb==1.6.2
- pip==21.3.1
- privy==6.0.0
- pycparser==2.21
- requests==2.27.1
- setuptools==59.6.0
- tinydb==4.7.0
- urllib3==1.26.20
prefix: /opt/conda/envs/bit
| [
"tests/test_format.py::test_address_to_public_key_hash"
] | [
"tests/test_wallet.py::TestPrivateKey::test_get_balance",
"tests/test_wallet.py::TestPrivateKey::test_get_unspent",
"tests/test_wallet.py::TestPrivateKey::test_get_transactions",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_get_balance",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_get_unspent",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_get_transactions",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_send",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_send_pay2sh",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_cold_storage"
] | [
"tests/test_format.py::TestGetVersion::test_mainnet",
"tests/test_format.py::TestGetVersion::test_testnet",
"tests/test_format.py::TestGetVersion::test_invalid",
"tests/test_format.py::TestGetVersion::test_mainnet_pay2sh",
"tests/test_format.py::TestGetVersion::test_testnet_pay2sh",
"tests/test_format.py::TestVerifySig::test_valid",
"tests/test_format.py::TestVerifySig::test_invalid",
"tests/test_format.py::TestBytesToWIF::test_mainnet",
"tests/test_format.py::TestBytesToWIF::test_testnet",
"tests/test_format.py::TestBytesToWIF::test_compressed",
"tests/test_format.py::TestBytesToWIF::test_compressed_testnet",
"tests/test_format.py::TestWIFToBytes::test_mainnet",
"tests/test_format.py::TestWIFToBytes::test_testnet",
"tests/test_format.py::TestWIFToBytes::test_compressed",
"tests/test_format.py::TestWIFToBytes::test_invalid_network",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_main_success",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_test_success",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_compressed_success",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_decode_failure",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_other_failure",
"tests/test_format.py::TestPublicKeyToCoords::test_public_key_to_coords_compressed",
"tests/test_format.py::TestPublicKeyToCoords::test_public_key_to_coords_uncompressed",
"tests/test_format.py::TestPublicKeyToCoords::test_public_key_to_coords_incorrect_length",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_compressed",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_uncompressed",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_incorrect_length",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_test_compressed",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_test_uncompressed",
"tests/test_format.py::TestCoordsToPublicKey::test_coords_to_public_key_compressed",
"tests/test_format.py::TestCoordsToPublicKey::test_coords_to_public_key_uncompressed",
"tests/test_format.py::test_point_to_public_key",
"tests/test_wallet.py::TestWIFToKey::test_compressed_main",
"tests/test_wallet.py::TestWIFToKey::test_uncompressed_main",
"tests/test_wallet.py::TestWIFToKey::test_compressed_test",
"tests/test_wallet.py::TestWIFToKey::test_uncompressed_test",
"tests/test_wallet.py::TestBaseKey::test_init_default",
"tests/test_wallet.py::TestBaseKey::test_init_from_key",
"tests/test_wallet.py::TestBaseKey::test_init_wif_error",
"tests/test_wallet.py::TestBaseKey::test_public_key_compressed",
"tests/test_wallet.py::TestBaseKey::test_public_key_uncompressed",
"tests/test_wallet.py::TestBaseKey::test_public_point",
"tests/test_wallet.py::TestBaseKey::test_sign",
"tests/test_wallet.py::TestBaseKey::test_verify_success",
"tests/test_wallet.py::TestBaseKey::test_verify_failure",
"tests/test_wallet.py::TestBaseKey::test_to_hex",
"tests/test_wallet.py::TestBaseKey::test_to_bytes",
"tests/test_wallet.py::TestBaseKey::test_to_der",
"tests/test_wallet.py::TestBaseKey::test_to_pem",
"tests/test_wallet.py::TestBaseKey::test_to_int",
"tests/test_wallet.py::TestBaseKey::test_is_compressed",
"tests/test_wallet.py::TestBaseKey::test_equal",
"tests/test_wallet.py::TestPrivateKey::test_alias",
"tests/test_wallet.py::TestPrivateKey::test_init_default",
"tests/test_wallet.py::TestPrivateKey::test_address",
"tests/test_wallet.py::TestPrivateKey::test_to_wif",
"tests/test_wallet.py::TestPrivateKey::test_from_hex",
"tests/test_wallet.py::TestPrivateKey::test_from_der",
"tests/test_wallet.py::TestPrivateKey::test_from_pem",
"tests/test_wallet.py::TestPrivateKey::test_from_int",
"tests/test_wallet.py::TestPrivateKey::test_repr",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_init_default",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_address",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_to_wif",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_hex",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_der",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_pem",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_int",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_repr"
] | [] | MIT License | 1,962 | 109 | [
"bit/format.py"
] |
mpdavis__python-jose-76 | 28cc6719eceb89129eed59c25f7bdac015665bdd | 2017-12-19 12:53:56 | 28cc6719eceb89129eed59c25f7bdac015665bdd | mpdavis: It looks like there are builds failing on Python 2.6 and 3.3.
Python 2.6 is failing because pytest dropped support. It looks like we will need to pin pytest in `tox.ini` for 2.6 builds (or possibly just all builds if easier).
I am still looking into the 3.3 failure. I can take a look later if you don't want to worry about it.
leplatrem: I also saw this:
```
$ tox
Matching undeclared envs is deprecated. Be sure all the envs that Tox should run are declared in the tox config.
``` | diff --git a/jose/jwt.py b/jose/jwt.py
index 2da511f..3ba3250 100644
--- a/jose/jwt.py
+++ b/jose/jwt.py
@@ -408,24 +408,28 @@ def _validate_jti(claims):
def _validate_at_hash(claims, access_token, algorithm):
"""
- Validates that the 'at_hash' parameter included in the claims matches
- with the access_token returned alongside the id token as part of
- the authorization_code flow.
+ Validates that the 'at_hash' is valid.
+
+ Its value is the base64url encoding of the left-most half of the hash
+ of the octets of the ASCII representation of the access_token value,
+ where the hash algorithm used is the hash algorithm used in the alg
+ Header Parameter of the ID Token's JOSE Header. For instance, if the
+ alg is RS256, hash the access_token value with SHA-256, then take the
+ left-most 128 bits and base64url encode them. The at_hash value is a
+ case sensitive string. Use of this claim is OPTIONAL.
Args:
- claims (dict): The claims dictionary to validate.
- access_token (str): The access token returned by the OpenID Provider.
- algorithm (str): The algorithm used to sign the JWT, as specified by
- the token headers.
+ claims (dict): The claims dictionary to validate.
+ access_token (str): The access token returned by the OpenID Provider.
+ algorithm (str): The algorithm used to sign the JWT, as specified by
+ the token headers.
"""
- if 'at_hash' not in claims and not access_token:
+ if 'at_hash' not in claims:
return
- elif 'at_hash' in claims and not access_token:
+
+ if not access_token:
msg = 'No access_token provided to compare against at_hash claim.'
raise JWTClaimsError(msg)
- elif access_token and 'at_hash' not in claims:
- msg = 'at_hash claim missing from token.'
- raise JWTClaimsError(msg)
try:
expected_hash = calculate_at_hash(access_token,
@@ -433,7 +437,7 @@ def _validate_at_hash(claims, access_token, algorithm):
except (TypeError, ValueError):
msg = 'Unable to calculate at_hash to verify against token claims.'
raise JWTClaimsError(msg)
-
+
if claims['at_hash'] != expected_hash:
raise JWTClaimsError('at_hash claim does not match access_token.')
| Should at_hash claim verification fail when missing from JWT?
It looks like `at_hash` in JWT payload is optional (see http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken).
However, in python-jose, when both `id_token` and `access_token` parameters are specified, decoding a JWT that has no `at_hash` claim raises an error (*at_hash claim missing from token*)
https://github.com/mpdavis/python-jose/pull/30/files#diff-b106d01229785c64375df96ca4b3f58cR422
Shouldn't it be acceptable since the spec says it's optional?
Obviously we can disable at_hash verification with the appropriate decode option, but we find it useful to perform claims verification on JWT that have it or not with the same code. Maybe with a `allow_missing_at_hash` option or something?
Huge thanks for this lib 😻 | mpdavis/python-jose | diff --git a/tests/test_jwt.py b/tests/test_jwt.py
index 485fff5..beb6789 100644
--- a/tests/test_jwt.py
+++ b/tests/test_jwt.py
@@ -468,8 +468,8 @@ class TestJWT:
def test_at_hash_missing_claim(self, claims, key):
token = jwt.encode(claims, key)
- with pytest.raises(JWTError):
- jwt.decode(token, key, access_token='<ACCESS_TOKEN>')
+ payload = jwt.decode(token, key, access_token='<ACCESS_TOKEN>')
+ assert 'at_hash' not in payload
def test_at_hash_unable_to_calculate(self, claims, key):
token = jwt.encode(claims, key, access_token='<ACCESS_TOKEN>')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
ecdsa==0.19.1
future==0.18.3
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycrypto==2.6.1
pycryptodome==3.21.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
-e git+https://github.com/mpdavis/python-jose.git@28cc6719eceb89129eed59c25f7bdac015665bdd#egg=python_jose
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: python-jose
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- ecdsa==0.19.1
- future==0.18.3
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycrypto==2.6.1
- pycryptodome==3.21.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-jose
| [
"tests/test_jwt.py::TestJWT::test_at_hash_missing_claim"
] | [] | [
"tests/test_jwt.py::TestJWT::test_non_default_alg",
"tests/test_jwt.py::TestJWT::test_non_default_alg_positional_bwcompat",
"tests/test_jwt.py::TestJWT::test_non_default_headers",
"tests/test_jwt.py::TestJWT::test_encode",
"tests/test_jwt.py::TestJWT::test_decode",
"tests/test_jwt.py::TestJWT::test_leeway_is_int",
"tests/test_jwt.py::TestJWT::test_leeway_is_timedelta",
"tests/test_jwt.py::TestJWT::test_iat_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_datetime",
"tests/test_jwt.py::TestJWT::test_nbf_with_leeway",
"tests/test_jwt.py::TestJWT::test_nbf_in_future",
"tests/test_jwt.py::TestJWT::test_nbf_skip",
"tests/test_jwt.py::TestJWT::test_exp_not_int",
"tests/test_jwt.py::TestJWT::test_exp_datetime",
"tests/test_jwt.py::TestJWT::test_exp_with_leeway",
"tests/test_jwt.py::TestJWT::test_exp_in_past",
"tests/test_jwt.py::TestJWT::test_exp_skip",
"tests/test_jwt.py::TestJWT::test_aud_string",
"tests/test_jwt.py::TestJWT::test_aud_list",
"tests/test_jwt.py::TestJWT::test_aud_list_multiple",
"tests/test_jwt.py::TestJWT::test_aud_list_is_strings",
"tests/test_jwt.py::TestJWT::test_aud_case_sensitive",
"tests/test_jwt.py::TestJWT::test_aud_empty_claim",
"tests/test_jwt.py::TestJWT::test_aud_not_string_or_list",
"tests/test_jwt.py::TestJWT::test_aud_given_number",
"tests/test_jwt.py::TestJWT::test_iss_string",
"tests/test_jwt.py::TestJWT::test_iss_list",
"tests/test_jwt.py::TestJWT::test_iss_tuple",
"tests/test_jwt.py::TestJWT::test_iss_invalid",
"tests/test_jwt.py::TestJWT::test_sub_string",
"tests/test_jwt.py::TestJWT::test_sub_invalid",
"tests/test_jwt.py::TestJWT::test_sub_correct",
"tests/test_jwt.py::TestJWT::test_sub_incorrect",
"tests/test_jwt.py::TestJWT::test_jti_string",
"tests/test_jwt.py::TestJWT::test_jti_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash",
"tests/test_jwt.py::TestJWT::test_at_hash_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash_missing_access_token",
"tests/test_jwt.py::TestJWT::test_at_hash_unable_to_calculate",
"tests/test_jwt.py::TestJWT::test_unverified_claims_string",
"tests/test_jwt.py::TestJWT::test_unverified_claims_list",
"tests/test_jwt.py::TestJWT::test_unverified_claims_object"
] | [] | MIT License | 1,967 | 599 | [
"jose/jwt.py"
] |
ipython__ipython-10959 | 38e0033a6cf59136208b998c394ac5472b9c1849 | 2017-12-21 13:42:48 | cc353b25b0fff58e4ed13899df9b3c8153df01d9 | diff --git a/IPython/lib/pretty.py b/IPython/lib/pretty.py
index cbbb72600..9181113e3 100644
--- a/IPython/lib/pretty.py
+++ b/IPython/lib/pretty.py
@@ -392,6 +392,10 @@ def pretty(self, obj):
meth = cls._repr_pretty_
if callable(meth):
return meth(obj, self, cycle)
+ if cls is not object \
+ and callable(cls.__dict__.get('__repr__')):
+ return _repr_pprint(obj, self, cycle)
+
return _default_pprint(obj, self, cycle)
finally:
self.end_group()
@@ -537,17 +541,12 @@ def _default_pprint(obj, p, cycle):
p.end_group(1, '>')
-def _seq_pprinter_factory(start, end, basetype):
+def _seq_pprinter_factory(start, end):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, and lists.
"""
def inner(obj, p, cycle):
- typ = type(obj)
- if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
- # If the subclass provides its own repr, use it instead.
- return p.text(typ.__repr__(obj))
-
if cycle:
return p.text(start + '...' + end)
step = len(start)
@@ -564,21 +563,16 @@ def inner(obj, p, cycle):
return inner
-def _set_pprinter_factory(start, end, basetype):
+def _set_pprinter_factory(start, end):
"""
Factory that returns a pprint function useful for sets and frozensets.
"""
def inner(obj, p, cycle):
- typ = type(obj)
- if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
- # If the subclass provides its own repr, use it instead.
- return p.text(typ.__repr__(obj))
-
if cycle:
return p.text(start + '...' + end)
if len(obj) == 0:
# Special case.
- p.text(basetype.__name__ + '()')
+ p.text(type(obj).__name__ + '()')
else:
step = len(start)
p.begin_group(step, start)
@@ -596,17 +590,12 @@ def inner(obj, p, cycle):
return inner
-def _dict_pprinter_factory(start, end, basetype=None):
+def _dict_pprinter_factory(start, end):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
- typ = type(obj)
- if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
- # If the subclass provides its own repr, use it instead.
- return p.text(typ.__repr__(obj))
-
if cycle:
return p.text('{...}')
step = len(start)
@@ -745,12 +734,12 @@ def _exception_pprint(obj, p, cycle):
int: _repr_pprint,
float: _repr_pprint,
str: _repr_pprint,
- tuple: _seq_pprinter_factory('(', ')', tuple),
- list: _seq_pprinter_factory('[', ']', list),
- dict: _dict_pprinter_factory('{', '}', dict),
+ tuple: _seq_pprinter_factory('(', ')'),
+ list: _seq_pprinter_factory('[', ']'),
+ dict: _dict_pprinter_factory('{', '}'),
- set: _set_pprinter_factory('{', '}', set),
- frozenset: _set_pprinter_factory('frozenset({', '})', frozenset),
+ set: _set_pprinter_factory('{', '}'),
+ frozenset: _set_pprinter_factory('frozenset({', '})'),
super: _super_pprint,
_re_pattern_type: _re_pattern_pprint,
type: _type_pprint,
| OrderedDict output differs in ipython from python (and official documentation)
[collections — Container datatypes](https://docs.python.org/3/library/collections.html#collections.OrderedDict)
An ordered dictionary can be combined with the Counter class so that the counter remembers the order elements are first encountered:
```
from collections import Counter, OrderedDict
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
```
This is based off a Raymond Hettinger - Super considered super! - PyCon 2015, and his blog post [Python’s super() considered super!](https://rhettinger.wordpress.com/2011/05/26/super-considered-super/).
My confusion is IPython returns a result that differs from the Python it's built on. The ordering of the output is important and the recipe in Hettinger's blog and the official documentation preserves the insertion order but IPython 6.1.0 built on Python 3.6.3 does not. However if I execute directly in Python 3.6.3 I receive the expected desired result.
`oc = OrderedCounter('abracadabra')`
output in Python 3.6.3 (and Python 2.7.14)
`OrderedCounter(OrderedDict([('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]))`
output in IPython 6.1.0 on Python 3.6.3 Anaconda
`OrderedCounter({'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r': 2})` //ordering is not preserved, seems sorted by key
Would someone kindly explain why the output is different in IPython 6.1.0 | ipython/ipython | diff --git a/IPython/core/tests/test_formatters.py b/IPython/core/tests/test_formatters.py
index 35edc75d1..cde43c94a 100644
--- a/IPython/core/tests/test_formatters.py
+++ b/IPython/core/tests/test_formatters.py
@@ -49,7 +49,7 @@ def test_pretty():
f = PlainTextFormatter()
f.for_type(A, foo_printer)
nt.assert_equal(f(A()), 'foo')
- nt.assert_equal(f(B()), 'foo')
+ nt.assert_equal(f(B()), 'B()')
nt.assert_equal(f(GoodPretty()), 'foo')
# Just don't raise an exception for the following:
f(BadPretty())
diff --git a/IPython/lib/tests/test_pretty.py b/IPython/lib/tests/test_pretty.py
index 6d6574345..68e90ecae 100644
--- a/IPython/lib/tests/test_pretty.py
+++ b/IPython/lib/tests/test_pretty.py
@@ -420,4 +420,24 @@ def meaning_of_life(question=None):
return "Don't panic"
nt.assert_in('meaning_of_life(question=None)', pretty.pretty(meaning_of_life))
-
+
+
+class OrderedCounter(Counter, OrderedDict):
+ 'Counter that remembers the order elements are first encountered'
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
+
+ def __reduce__(self):
+ return self.__class__, (OrderedDict(self),)
+
+class MySet(set): # Override repr of a basic type
+ def __repr__(self):
+ return 'mine'
+
+def test_custom_repr():
+ """A custom repr should override a pretty printer for a parent type"""
+ oc = OrderedCounter("abracadabra")
+ nt.assert_in("OrderedCounter(OrderedDict", pretty.pretty(oc))
+
+ nt.assert_equal(pretty.pretty(MySet()), 'mine')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 6.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
backcall==0.2.0
certifi==2025.1.31
charset-normalizer==3.4.1
decorator==5.2.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==5.5.6
-e git+https://github.com/ipython/ipython.git@38e0033a6cf59136208b998c394ac5472b9c1849#egg=ipython
ipython-genutils==0.2.0
jedi==0.19.2
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter_client==8.6.3
jupyter_core==5.7.2
nbformat==5.10.4
nose==1.3.7
numpy==2.0.2
packaging==24.2
parso==0.8.4
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
pluggy==1.5.0
prompt-toolkit==1.0.18
ptyprocess==0.7.0
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rpds-py==0.24.0
simplegeneric==0.8.1
six==1.17.0
testpath==0.6.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
zipp==3.21.0
| name: ipython
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- backcall==0.2.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- decorator==5.2.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==5.5.6
- ipython-genutils==0.2.0
- jedi==0.19.2
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- nbformat==5.10.4
- nose==1.3.7
- numpy==2.0.2
- packaging==24.2
- parso==0.8.4
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==1.0.18
- ptyprocess==0.7.0
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rpds-py==0.24.0
- simplegeneric==0.8.1
- six==1.17.0
- testpath==0.6.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- typing-extensions==4.13.0
- urllib3==2.3.0
- wcwidth==0.2.13
- zipp==3.21.0
prefix: /opt/conda/envs/ipython
| [
"IPython/core/tests/test_formatters.py::test_pretty",
"IPython/lib/tests/test_pretty.py::test_custom_repr"
] | [
"IPython/core/tests/test_formatters.py::test_error_method",
"IPython/core/tests/test_formatters.py::test_warn_error_for_type",
"IPython/core/tests/test_formatters.py::test_error_pretty_method",
"IPython/core/tests/test_formatters.py::test_bad_repr_traceback",
"IPython/core/tests/test_formatters.py::test_ipython_display_formatter",
"IPython/core/tests/test_formatters.py::test_repr_mime",
"IPython/core/tests/test_formatters.py::test_pass_correct_include_exclude",
"IPython/core/tests/test_formatters.py::test_repr_mime_meta",
"IPython/core/tests/test_formatters.py::test_repr_mime_failure"
] | [
"IPython/core/tests/test_formatters.py::test_deferred",
"IPython/core/tests/test_formatters.py::test_precision",
"IPython/core/tests/test_formatters.py::test_bad_precision",
"IPython/core/tests/test_formatters.py::test_for_type",
"IPython/core/tests/test_formatters.py::test_for_type_string",
"IPython/core/tests/test_formatters.py::test_for_type_by_name",
"IPython/core/tests/test_formatters.py::test_lookup",
"IPython/core/tests/test_formatters.py::test_lookup_string",
"IPython/core/tests/test_formatters.py::test_lookup_by_type",
"IPython/core/tests/test_formatters.py::test_lookup_by_type_string",
"IPython/core/tests/test_formatters.py::test_in_formatter",
"IPython/core/tests/test_formatters.py::test_string_in_formatter",
"IPython/core/tests/test_formatters.py::test_pop",
"IPython/core/tests/test_formatters.py::test_pop_string",
"IPython/core/tests/test_formatters.py::test_nowarn_notimplemented",
"IPython/core/tests/test_formatters.py::test_pdf_formatter",
"IPython/core/tests/test_formatters.py::test_print_method_bound",
"IPython/core/tests/test_formatters.py::test_print_method_weird",
"IPython/core/tests/test_formatters.py::test_format_config",
"IPython/core/tests/test_formatters.py::test_pretty_max_seq_length",
"IPython/core/tests/test_formatters.py::test_json_as_string_deprecated",
"IPython/lib/tests/test_pretty.py::test_indentation",
"IPython/lib/tests/test_pretty.py::test_dispatch",
"IPython/lib/tests/test_pretty.py::test_callability_checking",
"IPython/lib/tests/test_pretty.py::test_pprint_heap_allocated_type",
"IPython/lib/tests/test_pretty.py::test_pprint_nomod",
"IPython/lib/tests/test_pretty.py::test_pprint_break",
"IPython/lib/tests/test_pretty.py::test_pprint_break_repr",
"IPython/lib/tests/test_pretty.py::test_bad_repr",
"IPython/lib/tests/test_pretty.py::test_really_bad_repr",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_dict",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_list",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_set",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_tuple",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_super_repr",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_unbound_method",
"IPython/lib/tests/test_pretty.py::test_metaclass_repr",
"IPython/lib/tests/test_pretty.py::test_unicode_repr",
"IPython/lib/tests/test_pretty.py::test_basic_class",
"IPython/lib/tests/test_pretty.py::test_collections_defaultdict",
"IPython/lib/tests/test_pretty.py::test_collections_ordereddict",
"IPython/lib/tests/test_pretty.py::test_collections_deque",
"IPython/lib/tests/test_pretty.py::test_collections_counter",
"IPython/lib/tests/test_pretty.py::test_mappingproxy",
"IPython/lib/tests/test_pretty.py::test_function_pretty"
] | [] | BSD 3-Clause "New" or "Revised" License | 1,975 | 992 | [
"IPython/lib/pretty.py"
] |
|
palantir__python-language-server-211 | 897980b7e2bd71811311cb49b18cf89ed3aa9cbe | 2017-12-26 01:15:40 | 12b93fe83b9c01a8cdf5a6fe902af60c59742b99 | evandrocoan: @lgeiger, This should have been fixed by https://github.com/palantir/python-language-server/pull/234
lgeiger: > This should have been fixed by #234
👍I rebased. Let's see what CI thinks.
lgeiger: Thanks @evandrocoan for pointing this out.
I had to revert #220 to fix #239. Now the tests should pass. | diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index 8441281..96efafd 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -12,12 +12,12 @@ def pyls_lint(config, document):
log.debug("Got pycodestyle settings: %s", settings)
opts = {
- 'exclude': ','.join(settings.get('exclude') or []),
- 'filename': ','.join(settings.get('filename') or []),
+ 'exclude': settings.get('exclude'),
+ 'filename': settings.get('filename'),
'hang_closing': settings.get('hangClosing'),
- 'ignore': ','.join(settings.get('ignore') or []),
+ 'ignore': settings.get('ignore'),
'max_line_length': settings.get('maxLineLength'),
- 'select': ','.join(settings.get('select') or []),
+ 'select': settings.get('select'),
}
kwargs = {k: v for k, v in opts.items() if v}
styleguide = pycodestyle.StyleGuide(kwargs)
| Fix ignored and select settings interface with pycodestyle
On https://github.com/PyCQA/pycodestyle/pull/722 they refused to fix their interface. When passing the list arguments as `ignore` and `select` settings to `pycodestyle`, it is required to pass a python list as `["E201", "E501"]`, instead of a string `"E201,E501"`, otherwise they will cause the issue pointed on: https://github.com/tomv564/LSP/issues/244#issuecomment-358753274 | palantir/python-language-server | diff --git a/test/plugins/test_pycodestyle_lint.py b/test/plugins/test_pycodestyle_lint.py
index 028997f..583da79 100644
--- a/test/plugins/test_pycodestyle_lint.py
+++ b/test/plugins/test_pycodestyle_lint.py
@@ -8,7 +8,7 @@ from pyls.plugins import pycodestyle_lint
DOC_URI = uris.from_fs_path(__file__)
DOC = """import sys
-def hello():
+def hello( ):
\tpass
import json
@@ -40,6 +40,14 @@ def test_pycodestyle(config):
assert mod_import['range']['start'] == {'line': 7, 'character': 0}
assert mod_import['range']['end'] == {'line': 7, 'character': 1}
+ msg = "E201 whitespace after '('"
+ mod_import = [d for d in diags if d['message'] == msg][0]
+
+ assert mod_import['code'] == 'E201'
+ assert mod_import['severity'] == lsp.DiagnosticSeverity.Warning
+ assert mod_import['range']['start'] == {'line': 2, 'character': 10}
+ assert mod_import['range']['end'] == {'line': 2, 'character': 14}
+
def test_pycodestyle_config(workspace):
""" Test that we load config files properly.
@@ -66,7 +74,7 @@ def test_pycodestyle_config(workspace):
assert [d for d in diags if d['code'] == 'W191']
content = {
- 'setup.cfg': ('[pycodestyle]\nignore = W191', True),
+ 'setup.cfg': ('[pycodestyle]\nignore = W191, E201', True),
'tox.ini': ('', False)
}
@@ -77,18 +85,16 @@ def test_pycodestyle_config(workspace):
# And make sure we don't get any warnings
diags = pycodestyle_lint.pyls_lint(config, doc)
- assert len([d for d in diags if d['code'] == 'W191']) == 0 if working else 1
+ assert len([d for d in diags if d['code'] == 'W191']) == (0 if working else 1)
+ assert len([d for d in diags if d['code'] == 'E201']) == (0 if working else 1)
+ assert [d for d in diags if d['code'] == 'W391']
os.unlink(os.path.join(workspace.root_path, conf_file))
# Make sure we can ignore via the PYLS config as well
- config.update({'plugins': {'pycodestyle': {'ignore': ['W191']}}})
+ config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'E201']}}})
# And make sure we only get one warning
diags = pycodestyle_lint.pyls_lint(config, doc)
assert not [d for d in diags if d['code'] == 'W191']
-
- # Ignore both warnings
- config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'W391']}}})
- # And make sure we get neither
- assert not [d for d in diags if d['code'] == 'W191']
- assert not [d for d in diags if d['code'] == 'W391']
+ assert not [d for d in diags if d['code'] == 'E201']
+ assert [d for d in diags if d['code'] == 'W391']
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
configparser==7.2.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
future==1.0.0
iniconfig==2.1.0
jedi==0.19.2
json-rpc==1.15.0
mccabe==0.7.0
packaging==24.2
parso==0.8.4
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pydocstyle==6.3.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/palantir/python-language-server.git@897980b7e2bd71811311cb49b18cf89ed3aa9cbe#egg=python_language_server
pytoolconfig==1.3.1
rope==1.13.0
snowballstemmer==2.2.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
versioneer==0.29
virtualenv==20.29.3
yapf==0.43.0
| name: python-language-server
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- configparser==7.2.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- future==1.0.0
- iniconfig==2.1.0
- jedi==0.19.2
- json-rpc==1.15.0
- mccabe==0.7.0
- packaging==24.2
- parso==0.8.4
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pydocstyle==6.3.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytoolconfig==1.3.1
- rope==1.13.0
- snowballstemmer==2.2.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- versioneer==0.29
- virtualenv==20.29.3
- yapf==0.43.0
prefix: /opt/conda/envs/python-language-server
| [
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle_config"
] | [] | [
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle"
] | [] | MIT License | 1,979 | 267 | [
"pyls/plugins/pycodestyle_lint.py"
] |
missionpinball__mpf-1062 | 6ba39ef0491365f87da0a9bc28ab541a600bc7f4 | 2017-12-26 19:49:15 | 2c1bb3aa1e25674916bc4e0d17ccb6c3c87bd01b | diff --git a/mpf/_version.py b/mpf/_version.py
index 11d3ee171..d79f94544 100644
--- a/mpf/_version.py
+++ b/mpf/_version.py
@@ -10,7 +10,7 @@ PyPI.
"""
-__version__ = '0.50.0-dev.44'
+__version__ = '0.50.0-dev.43'
'''The full version of MPF.'''
__short_version__ = '0.50'
diff --git a/mpf/core/config_processor.py b/mpf/core/config_processor.py
index 95868ca8b..56b8ad0fc 100755
--- a/mpf/core/config_processor.py
+++ b/mpf/core/config_processor.py
@@ -26,6 +26,9 @@ class ConfigProcessor(object):
if not ConfigValidator.config_spec:
ConfigValidator.load_config_spec()
+ if not config:
+ return dict()
+
for k in config.keys():
try:
if config_type not in ConfigValidator.config_spec[k][
diff --git a/mpf/core/data_manager.py b/mpf/core/data_manager.py
index b06683d07..42f5a4f39 100644
--- a/mpf/core/data_manager.py
+++ b/mpf/core/data_manager.py
@@ -73,6 +73,9 @@ class DataManager(MpfController):
self.debug_log("Didn't find the %s file. No prob. We'll create "
"it when we save.", self.name)
+ if not self.data:
+ self.data = {}
+
def get_data(self, section=None):
"""Return the value of this DataManager's data.
diff --git a/mpf/core/machine.py b/mpf/core/machine.py
index ea4067514..b6e542940 100644
--- a/mpf/core/machine.py
+++ b/mpf/core/machine.py
@@ -698,8 +698,7 @@ class MachineController(LogMixin):
self.info_log("Starting the main run loop.")
try:
init = Util.ensure_future(self.initialise(), loop=self.clock.loop)
- self.clock.loop.run_until_complete(Util.first([init, self.stop_future], cancel_others=False,
- loop=self.clock.loop))
+ self.clock.loop.run_until_complete(Util.first([init, self.stop_future], loop=self.clock.loop))
except RuntimeError:
# do not show a runtime useless runtime error
self.error_log("Failed to initialise MPF")
diff --git a/mpf/file_interfaces/yaml_interface.py b/mpf/file_interfaces/yaml_interface.py
index 4be0ba59c..e68a4b31d 100644
--- a/mpf/file_interfaces/yaml_interface.py
+++ b/mpf/file_interfaces/yaml_interface.py
@@ -275,7 +275,7 @@ class YamlInterface(FileInterface):
@staticmethod
def process(data_string: Iterable[str]) -> dict:
"""Parse yaml from a string."""
- return Util.keys_to_lower(yaml.load(data_string, Loader=MpfLoader))
+ return yaml.load(data_string, Loader=MpfLoader)
def save(self, filename: str, data: dict) -> None: # pragma: no cover
"""Save config to yaml file."""
| String placeholders are lower cased. Support uppercase strings
Fix and test | missionpinball/mpf | diff --git a/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml b/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml
index f5a729241..20966a09a 100644
--- a/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml
+++ b/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml
@@ -26,6 +26,3 @@ test_section:
case_sensitive_1: test
Case_sensitive_2: test
case_sensitive_3: Test
-
-Test_section_1:
- test: test
diff --git a/mpf/tests/machine_files/p_roc/config/config.yaml b/mpf/tests/machine_files/p_roc/config/config.yaml
index 3e718b0f0..c35357873 100644
--- a/mpf/tests/machine_files/p_roc/config/config.yaml
+++ b/mpf/tests/machine_files/p_roc/config/config.yaml
@@ -4,7 +4,7 @@ hardware:
driverboards: pdb
platform: p_roc
-P_ROC:
+p_roc:
dmd_timing_cycles: 1, 2, 3, 4
switches:
diff --git a/mpf/tests/machine_files/shows/config/test_shows.yaml b/mpf/tests/machine_files/shows/config/test_shows.yaml
index f6235c992..c17e8c680 100644
--- a/mpf/tests/machine_files/shows/config/test_shows.yaml
+++ b/mpf/tests/machine_files/shows/config/test_shows.yaml
@@ -171,8 +171,8 @@ show_player:
show_assoc_tokens:
speed: 1
show_tokens:
- line1Num: tag1
- line1Color: red
+ line1num: tag1
+ line1color: red
stop_show_assoc_tokens:
show_assoc_tokens:
action: stop
diff --git a/mpf/tests/test_Config.py b/mpf/tests/test_Config.py
index 97014c7b1..9246105be 100644
--- a/mpf/tests/test_Config.py
+++ b/mpf/tests/test_Config.py
@@ -14,8 +14,6 @@ class TestConfig(MpfTestCase):
self.add_to_config_validator('test_section',
dict(__valid_in__='machine'))
- self.add_to_config_validator('test_section_1',
- dict(__valid_in__='machine'))
super().setUp()
@@ -49,19 +47,16 @@ class TestConfig(MpfTestCase):
self.assertEqual('+5', self.machine.config['test_section']['str_plus5'])
self.assertEqual('+0.5', self.machine.config['test_section']['str_plus0point5'])
- # keys should be all lowercase
+ # keys should keep case
self.assertIn('case_sensitive_1', self.machine.config['test_section'])
- self.assertIn('case_sensitive_2', self.machine.config['test_section'])
+ self.assertIn('Case_sensitive_2', self.machine.config['test_section'])
self.assertIn('case_sensitive_3', self.machine.config['test_section'])
# values should be case sensitive
self.assertEqual(self.machine.config['test_section']['case_sensitive_1'], 'test')
- self.assertEqual(self.machine.config['test_section']['case_sensitive_2'], 'test')
+ self.assertEqual(self.machine.config['test_section']['Case_sensitive_2'], 'test')
self.assertEqual(self.machine.config['test_section']['case_sensitive_3'], 'Test')
- # key should be lowercase even though it's uppercase in the config
- self.assertIn('test_section_1', self.machine.config)
-
def test_config_validator(self):
validation_failure_info = (("key", "entry"), "subkey")
# test config spec syntax error
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | 0.33 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc",
"pip install -U pip setuptools",
"pip install Cython==0.24.1"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asciimatics==1.14.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
Cython==0.24.1
future==1.0.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/missionpinball/mpf.git@6ba39ef0491365f87da0a9bc28ab541a600bc7f4#egg=mpf
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyfiglet==0.8.post1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pyserial==3.5
pyserial-asyncio==0.6
pytest==6.2.4
ruamel.base==1.0.0
ruamel.yaml==0.10.23
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing==3.7.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
wcwidth==0.2.13
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mpf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- asciimatics==1.14.0
- cython==0.24.1
- future==1.0.0
- pillow==8.4.0
- pip==21.3.1
- psutil==7.0.0
- pyfiglet==0.8.post1
- pyserial==3.5
- pyserial-asyncio==0.6
- ruamel-base==1.0.0
- ruamel-yaml==0.10.23
- setuptools==59.6.0
- typing==3.7.4.3
- wcwidth==0.2.13
prefix: /opt/conda/envs/mpf
| [
"mpf/tests/test_Config.py::TestConfig::test_config_file"
] | [] | [
"mpf/tests/test_Config.py::TestConfig::test_config_merge",
"mpf/tests/test_Config.py::TestConfig::test_config_validator"
] | [] | MIT License | 1,981 | 778 | [
"mpf/_version.py",
"mpf/core/config_processor.py",
"mpf/core/data_manager.py",
"mpf/core/machine.py",
"mpf/file_interfaces/yaml_interface.py"
] |
|
asottile__yesqa-4 | 52da14636029e7e8cc70c6a61912c14fa27ca50e | 2017-12-31 16:32:27 | ad85b55968036d30088048335194733ecaf06c13 | diff --git a/yesqa.py b/yesqa.py
index 008dc57..40cf153 100644
--- a/yesqa.py
+++ b/yesqa.py
@@ -43,14 +43,28 @@ def _remove_comments(tokens):
def _rewrite_noqa_comment(tokens, i, flake8_results):
+ # find logical lines that this noqa comment may affect
+ lines = set()
+ j = i
+ while j >= 0 and tokens[j].name not in {'NL', 'NEWLINE'}:
+ t = tokens[j]
+ if t.line is not None:
+ lines.update(range(t.line, t.line + t.src.count('\n') + 1))
+ j -= 1
+
+ lints = set()
+ for line in lines:
+ lints.update(flake8_results[line])
+
token = tokens[i]
match = NOQA_RE.match(token.src)
+
# exclude all lints on the line but no lints
- if token.line not in flake8_results:
+ if not lints:
_remove_comment(tokens, i)
elif match.group().lower() != '# noqa':
codes = set(SEP_RE.split(match.group(1)[2:]))
- expected_codes = codes & flake8_results[token.line]
+ expected_codes = codes & lints
if expected_codes != codes:
comment = '# noqa: {}'.format(','.join(sorted(expected_codes)))
tokens[i] = token._replace(src=NOQA_RE.sub(comment, token.src))
| False positive: removes `noqa` on multi-line string
```python
"""
aaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
""" # noqa
```
```console
$ flake8 test.py
$ yesqa test.py
Rewriting test.py
$ flake8 test.py
test.py:2:80: E501 line too long (82 > 79 characters)
``` | asottile/yesqa | diff --git a/tests/yesqa_test.py b/tests/yesqa_test.py
index e0225c8..9043a86 100644
--- a/tests/yesqa_test.py
+++ b/tests/yesqa_test.py
@@ -32,9 +32,11 @@ def test_non_utf8_bytes(tmpdir, capsys):
(
'', # noop
'# hello\n', # comment at beginning of file
- 'import os # noqa\n', # still needed
- 'import os # NOQA\n', # still needed
- 'import os # noqa: F401\n', # still needed
+ # still needed
+ 'import os # noqa\n',
+ 'import os # NOQA\n',
+ 'import os # noqa: F401\n',
+ '"""\n' + 'a' * 40 + ' ' + 'b' * 60 + '\n""" # noqa\n',
),
)
def test_ok(assert_rewrite, src):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
tokenize_rt==6.1.0
tomli==2.2.1
-e git+https://github.com/asottile/yesqa.git@52da14636029e7e8cc70c6a61912c14fa27ca50e#egg=yesqa
| name: yesqa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- tokenize-rt==6.1.0
- tomli==2.2.1
prefix: /opt/conda/envs/yesqa
| [
"tests/yesqa_test.py::test_ok[\"\"\"\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
] | [] | [
"tests/yesqa_test.py::test_non_utf8_bytes",
"tests/yesqa_test.py::test_ok[]",
"tests/yesqa_test.py::test_ok[#",
"tests/yesqa_test.py::test_ok[import",
"tests/yesqa_test.py::test_rewrite[x",
"tests/yesqa_test.py::test_rewrite[import",
"tests/yesqa_test.py::test_rewrite[#",
"tests/yesqa_test.py::test_main"
] | [] | MIT License | 1,995 | 355 | [
"yesqa.py"
] |
|
Azure__WALinuxAgent-987 | 30e638ddab04bd4ec473fe8369a86f64e717776e | 2018-01-03 23:27:34 | 6e9b985c1d7d564253a1c344bab01b45093103cd | diff --git a/azurelinuxagent/common/protocol/restapi.py b/azurelinuxagent/common/protocol/restapi.py
index 275cedb0..540ec5d9 100644
--- a/azurelinuxagent/common/protocol/restapi.py
+++ b/azurelinuxagent/common/protocol/restapi.py
@@ -321,9 +321,9 @@ class Protocol(DataContract):
def get_artifacts_profile(self):
raise NotImplementedError()
- def download_ext_handler_pkg(self, uri, headers=None):
+ def download_ext_handler_pkg(self, uri, headers=None, use_proxy=True):
try:
- resp = restutil.http_get(uri, use_proxy=True, headers=headers)
+ resp = restutil.http_get(uri, headers=headers, use_proxy=use_proxy)
if restutil.request_succeeded(resp):
return resp.read()
except Exception as e:
diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py
index a92e0b89..2dc5297c 100644
--- a/azurelinuxagent/common/protocol/wire.py
+++ b/azurelinuxagent/common/protocol/wire.py
@@ -172,7 +172,7 @@ class WireProtocol(Protocol):
logger.warn("Download did not succeed, falling back to host plugin")
host = self.client.get_host_plugin()
uri, headers = host.get_artifact_request(uri, host.manifest_uri)
- package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers)
+ package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers, use_proxy=False)
return package
def report_provision_status(self, provision_status):
diff --git a/azurelinuxagent/ga/env.py b/azurelinuxagent/ga/env.py
index 0456cb06..26487818 100644
--- a/azurelinuxagent/ga/env.py
+++ b/azurelinuxagent/ga/env.py
@@ -86,7 +86,7 @@ class EnvHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.Firewall,
is_success=success,
- log_event=True)
+ log_event=False)
timeout = conf.get_root_device_scsi_timeout()
if timeout is not None:
diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py
index e3ef292f..9b90aa7b 100644
--- a/azurelinuxagent/ga/monitor.py
+++ b/azurelinuxagent/ga/monitor.py
@@ -203,7 +203,8 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HeartBeat,
is_success=True,
- message=msg)
+ message=msg,
+ log_event=False)
counter += 1
@@ -222,7 +223,7 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=False,
- msg=msg)
+ message=msg)
try:
self.collect_and_send_events()
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 26009299..1b6f913e 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -295,7 +295,7 @@ class UpdateHandler(object):
duration=elapsed_milliseconds(utc_start),
message="Incarnation {0}".format(
exthandlers_handler.last_etag),
- log_event=True)
+ log_event=False)
time.sleep(GOAL_STATE_INTERVAL)
diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py
index 22c3f9ca..fa47799a 100644
--- a/azurelinuxagent/pa/provision/cloudinit.py
+++ b/azurelinuxagent/pa/provision/cloudinit.py
@@ -64,7 +64,7 @@ class CloudInitProvisionHandler(ProvisionHandler):
logger.info("Finished provisioning")
self.report_ready(thumbprint)
- self.report_event("Provision succeed",
+ self.report_event("Provisioning with cloud-init succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
self.report_event(self.create_guest_state_telemetry_messsage(),
diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py
index 5d6f1565..44e171b4 100644
--- a/azurelinuxagent/pa/provision/default.py
+++ b/azurelinuxagent/pa/provision/default.py
@@ -89,7 +89,7 @@ class ProvisionHandler(object):
self.write_provisioned()
- self.report_event("Provision succeeded",
+ self.report_event("Provisioning succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
diff --git a/azurelinuxagent/pa/provision/factory.py b/azurelinuxagent/pa/provision/factory.py
index d87765f3..9e88618f 100644
--- a/azurelinuxagent/pa/provision/factory.py
+++ b/azurelinuxagent/pa/provision/factory.py
@@ -16,9 +16,7 @@
#
import azurelinuxagent.common.conf as conf
-import azurelinuxagent.common.logger as logger
-from azurelinuxagent.common.utils.textutil import Version
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_FULL_NAME
| [2.2.14] HostGAPlugin still use proxy while auto-updating
Hi there,
I saw there was an issue #769 "HostGAPlugin requests should never go through proxy". But currently it seems that only the blob status upload traffic can ignore the proxy when proxy is misconfigured. For the auto-update feature, only the manifest file can be downloaded without proxy, the WALA update packages still use proxy and fail to be downloaded. The extension downloading process is the same as auto-update. Is it by design? Thanks!
My steps:
1. Configure /etc/waagent.conf to set a wrong proxy, enable auto-update, and enable verbose log:
HttpProxy.Host=172.16.0.1
HttpProxy.Port=3128
Logs.Verbose=y
AutoUpdate.Enabled=y
Service waagent restart
2. Check /var/log/waagent.log
```
2017/07/12 06:29:39.476351 VERBOSE HTTP proxy: [172.16.0.1:3128]
2017/07/12 06:29:39.483259 VERBOSE HTTP connection [GET] [http://rdfepirv2sg1prdstr03.blob.core.windows.net:80/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml] [None] [None]
2017/07/12 06:29:49.513148 VERBOSE HTTP connection [GET] [http://rdfepirv2sg1prdstr03.blob.core.windows.net:80/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml] [None] [None]
2017/07/12 06:29:59.547066 VERBOSE HTTP connection [GET] [http://rdfepirv2sg1prdstr03.blob.core.windows.net:80/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml] [None] [None]
2017/07/12 06:30:09.572601 VERBOSE Fetch failed from [https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml]: [000009] HTTP GET failed
2017/07/12 06:30:09.591745 VERBOSE Manifest could not be downloaded, falling back to host plugin
2017/07/12 06:30:09.602838 VERBOSE HostGAPlugin: Getting API versions at [http://100.107.240.13:32526/versions]
2017/07/12 06:30:09.614843 VERBOSE HTTP connection [GET] [/versions] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d'}]
2017/07/12 06:30:09.632135 VERBOSE HTTP response status: [200]
2017/07/12 06:30:09.639179 INFO Event: name=WALinuxAgent, op=InitializeHostPlugin, message=
2017/07/12 06:30:09.650182 VERBOSE Fetch [http://100.107.240.13:32526/extensionArtifact] with headers [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml'}]
2017/07/12 06:30:09.689246 VERBOSE HTTP connection [GET] [/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml'}]
2017/07/12 06:30:09.763930 VERBOSE HTTP response status: [200]
2017/07/12 06:30:09.770335 VERBOSE Manifest downloaded successfully from host plugin
2017/07/12 06:30:09.778017 INFO Setting host plugin as default channel
2017/07/12 06:30:09.785914 VERBOSE Load ExtensionManifest.xml
2017/07/12 06:30:09.794175 VERBOSE Loading Agent WALinuxAgent-2.2.10 from package package
2017/07/12 06:30:09.803674 VERBOSE Agent WALinuxAgent-2.2.10 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2017/07/12 06:30:09.816401 VERBOSE Ensuring Agent WALinuxAgent-2.2.10 is downloaded
2017/07/12 06:30:09.825242 VERBOSE Using host plugin as default channel
2017/07/12 06:30:09.833194 VERBOSE HTTP proxy: [172.16.0.1:3128]
2017/07/12 06:30:09.839940 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:19.911177 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:29.992187 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:39.543400 VERBOSE Found event file: /var/lib/waagent/events/1499855409649965.tld
2017/07/12 06:30:39.554873 VERBOSE Processed event file: /var/lib/waagent/events/1499855409649965.tld
2017/07/12 06:30:39.566265 VERBOSE HTTP connection [POST] [/machine?comp=telemetrydata] [<?xml version="1.0"?><TelemetryData version="1.0"><Provider id="69B669B9-4AF8-4C50-BDC4-6006FA76E975"><Event id="1"><![CDATA[<Param Name="Name" Value="WALinuxAgent" T="mt:wstr" /><Param Name="Version" Value="2.2.14" T="mt:wstr" /><Param Name="IsInternal" Value="False" T="mt:bool" /><Param Name="Operation" Value="InitializeHostPlugin" T="mt:wstr" /><Param Name="OperationSuccess" Value="True" T="mt:bool" /><Param Name="Message" Value="" T="mt:wstr" /><Param Name="Duration" Value="0" T="mt:uint64" /><Param Name="ExtensionType" Value="" T="mt:wstr" /><Param Name="OSVersion" Value="Linux:redhat-6.9-Santiago:2.6.32-696.3.2.el6.x86_64" T="mt:wstr" /><Param Name="GAVersion" Value="WALinuxAgent-2.2.14" T="mt:wstr" /><Param Name="RAM" Value="1679" T="mt:uint64" /><Param Name="Processors" Value="1" T="mt:uint64" /><Param Name="VMName" Value="wala692214ui4" T="mt:wstr" /><Param Name="TenantName" Value="77bbebc9f3994cc48fbef61834a1822e" T="mt:wstr" /><Param Name="RoleName" Value="wala692214ui4" T="mt:wstr" /><Param Name="RoleInstanceName" Value="77bbebc9f3994cc48fbef61834a1822e.wala692214ui4" T="mt:wstr" /><Param Name="ContainerId" Value="d4384bb2-4f5c-4680-8c3d-9fa51841ba7d" T="mt:wstr" />]]></Event></Provider></TelemetryData>] [{'Content-Type': 'text/xml;charset=utf-8', 'x-ms-version': '2012-11-30', 'x-ms-agent-name': 'WALinuxAgent'}]
2017/07/12 06:30:39.768153 VERBOSE HTTP response status: [200]
2017/07/12 06:30:40.064471 VERBOSE Agent WALinuxAgent-2.2.10 download from http://100.107.240.13:32526/extensionArtifact failed [[000009] HTTP GET failed]
2017/07/12 06:30:40.079068 WARNING Host plugin download unsuccessful
2017/07/12 06:30:40.089613 VERBOSE Using host plugin as default channel
2017/07/12 06:30:40.097071 VERBOSE HTTP proxy: [172.16.0.1:3128]
2017/07/12 06:30:40.103549 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:50.176833 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:31:00.249870 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:31:10.331413 VERBOSE Agent WALinuxAgent-2.2.10 download from http://100.107.240.13:32526/extensionArtifact failed [[000009] HTTP GET failed]
2017/07/12 06:31:10.347366 WARNING Host plugin download unsuccessful
2017/07/12 06:31:10.355276 VERBOSE Using host plugin as default channel
2017/07/12 06:31:10.363528 VERBOSE HTTP proxy: [172.16.0.1:3128]
...
``` | Azure/WALinuxAgent | diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py
index ab0a9102..2c2d2c9b 100644
--- a/tests/pa/test_provision.py
+++ b/tests/pa/test_provision.py
@@ -151,7 +151,7 @@ class TestProvision(AgentTestCase):
ph.run()
- call1 = call("Provision succeeded", duration=ANY, is_success=True)
+ call1 = call("Provisioning succeeded", duration=ANY, is_success=True)
call2 = call(ANY, is_success=True, operation=WALAEventOperation.GuestState)
ph.report_event.assert_has_calls([call1, call2])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 8
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pyasn1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyasn1==0.5.1
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
-e git+https://github.com/Azure/WALinuxAgent.git@30e638ddab04bd4ec473fe8369a86f64e717776e#egg=WALinuxAgent
zipp==3.6.0
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyasn1==0.5.1
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_success"
] | [] | [
"tests/pa/test_provision.py::TestProvision::test_customdata",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned",
"tests/pa/test_provision.py::TestProvision::test_provision",
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail",
"tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled"
] | [] | Apache License 2.0 | 2,004 | 1,337 | [
"azurelinuxagent/common/protocol/restapi.py",
"azurelinuxagent/common/protocol/wire.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/pa/provision/cloudinit.py",
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/pa/provision/factory.py"
] |
|
ucfopen__canvasapi-127 | 5644a6c89cc851216ec8114443d3af857ac6f70e | 2018-01-04 17:53:13 | db3c377b68f2953e1618f4e4588cc2db8603841e | diff --git a/canvasapi/canvas.py b/canvasapi/canvas.py
index b6dfd7d..2f98dcf 100644
--- a/canvasapi/canvas.py
+++ b/canvasapi/canvas.py
@@ -472,20 +472,19 @@ class Canvas(object):
:type recipients: `list` of `str`
:param body: The body of the message being added.
:type body: `str`
- :rtype: :class:`canvasapi.paginated_list.PaginatedList` of
- :class:`canvasapi.conversation.Conversation`
+ :rtype: list of :class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
- return PaginatedList(
- Conversation,
- self.__requester,
+ kwargs['recipients'] = recipients
+ kwargs['body'] = body
+
+ response = self.__requester.request(
'POST',
'conversations',
- recipients=recipients,
- body=body,
_kwargs=combine_kwargs(**kwargs)
)
+ return [Conversation(self.__requester, convo) for convo in response.json()]
def get_conversation(self, conversation, **kwargs):
"""
| Create Conversation method not creating
`Canvas.create_conversation()` doesn't appear to be working properly.
If I try to create a conversation using the code bellow, nothing happens.
```
convo = canvas.create_conversation([USER_ID], 'Hello!')
```
The method returns a `PaginatedList`. However, if I iterate over the `PaginatedList`, the conversations are then created in Canvas. I suspect this is due to an odd interaction between PaginatedList and `POST` requests.
Additionally, in my testing, I was unable to successfully add a subject as a keyword argument:
```
convo = canvas.create_conversation([USER_ID], 'Hello!', subject='Test Subject')
```
The subject remained "(No Subject)" | ucfopen/canvasapi | diff --git a/tests/fixtures/conversation.json b/tests/fixtures/conversation.json
index 00b3e68..3c1bc71 100644
--- a/tests/fixtures/conversation.json
+++ b/tests/fixtures/conversation.json
@@ -51,16 +51,163 @@
"status_code": 200
},
"create_conversation": {
+ "method": "POST",
+ "endpoint": "conversations",
+ "data": [{
+ "id": 1,
+ "subject": "A Conversation",
+ "workflow_state": "unread",
+ "last_message": "Hello, World!",
+ "last_message_at": "2018-01-01T00:00:00Z",
+ "last_authored_message": "Hello, World!",
+ "last_authored_message_at": "2018-01-01T00:00:00Z",
+ "message_count": 1,
+ "subscribed": true,
+ "private": true,
+ "starred": false,
+ "properties": ["last_author"],
+ "messages": [{
+ "id": 1,
+ "author_id": 1,
+ "created_at": "2018-01-01T00:00:00Z",
+ "generated": false,
+ "body": "Hello, World!",
+ "forwarded_messages": [],
+ "attachments": [],
+ "media_comment": null,
+ "participating_user_ids": [1, 2]
+ }],
+ "audience": [2],
+ "audience_contexts": {
+ "courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "groups": {}
+ },
+ "participants": [
+ {
+ "id": 1,
+ "name": "John",
+ "common_courses": {},
+ "common_groups": {}
+ },
+ {
+ "id": 2,
+ "name": "Joe",
+ "common_courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "common_groups": {}
+ }
+ ],
+ "visible": true,
+ "context_code": null
+ }],
+ "status_code": 200
+ },
+ "create_conversation_multiple": {
"method": "POST",
"endpoint": "conversations",
"data": [
{
- "recipients": ["1", "2"],
- "body": "Test Conversation Body"
+ "id": 1,
+ "subject": "A Conversation",
+ "workflow_state": "unread",
+ "last_message": "Hey guys!",
+ "last_message_at": "2018-01-01T00:00:00Z",
+ "last_authored_message": "Hey guys!",
+ "last_authored_message_at": "2018-01-01T00:00:00Z",
+ "message_count": 1,
+ "subscribed": true,
+ "private": true,
+ "starred": false,
+ "properties": ["last_author"],
+ "messages": [{
+ "id": 1,
+ "author_id": 1,
+ "created_at": "2018-01-01T00:00:00Z",
+ "generated": false,
+ "body": "Hey guys!",
+ "forwarded_messages": [],
+ "attachments": [],
+ "media_comment": null,
+ "participating_user_ids": [1, 2]
+ }],
+ "audience": [2],
+ "audience_contexts": {
+ "courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "groups": {}
+ },
+ "participants": [
+ {
+ "id": 1,
+ "name": "John",
+ "common_courses": {},
+ "common_groups": {}
+ },
+ {
+ "id": 2,
+ "name": "Joe",
+ "common_courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "common_groups": {}
+ }
+ ],
+ "visible": true,
+ "context_code": null
},
{
- "recipients": ["3", "4"],
- "body": "Test Conversation Body 2"
+ "id": 2,
+ "subject": "A Conversation",
+ "workflow_state": "unread",
+ "last_message": "Hey guys!",
+ "last_message_at": "2018-01-01T00:00:00Z",
+ "last_authored_message": "Hey guys!",
+ "last_authored_message_at": "2018-01-01T00:00:00Z",
+ "message_count": 1,
+ "subscribed": true,
+ "private": true,
+ "starred": false,
+ "properties": ["last_author"],
+ "messages": [{
+ "id": 2,
+ "author_id": 1,
+ "created_at": "2018-01-01T00:00:00Z",
+ "generated": false,
+ "body": "Hey guys!",
+ "forwarded_messages": [],
+ "attachments": [],
+ "media_comment": null,
+ "participating_user_ids": [1, 3]
+ }],
+ "audience": [3],
+ "audience_contexts": {
+ "courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "groups": {}
+ },
+ "participants": [
+ {
+ "id": 1,
+ "name": "John",
+ "common_courses": {},
+ "common_groups": {}
+ },
+ {
+ "id": 3,
+ "name": "Jack",
+ "common_courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "common_groups": {}
+ }
+ ],
+ "visible": true,
+ "context_code": null
}
],
"status_code": 200
diff --git a/tests/test_canvas.py b/tests/test_canvas.py
index 3d75228..c3c59c1 100644
--- a/tests/test_canvas.py
+++ b/tests/test_canvas.py
@@ -331,14 +331,39 @@ class TestCanvas(unittest.TestCase):
def test_create_conversation(self, m):
register_uris({'conversation': ['create_conversation']}, m)
- recipients = ['1', '2']
- body = 'Test Conversation Body'
+ recipients = ['2']
+ body = 'Hello, World!'
- conversations = self.canvas.create_conversation(recipients=recipients, body=body)
- conversation_list = [conversation for conversation in conversations]
+ conversations = self.canvas.create_conversation(
+ recipients=recipients,
+ body=body
+ )
+ self.assertIsInstance(conversations, list)
+ self.assertEqual(len(conversations), 1)
+ self.assertIsInstance(conversations[0], Conversation)
+ self.assertTrue(hasattr(conversations[0], 'last_message'))
+ self.assertEqual(conversations[0].last_message, body)
- self.assertIsInstance(conversation_list[0], Conversation)
- self.assertEqual(len(conversation_list), 2)
+ def test_create_conversation_multiple_people(self, m):
+ register_uris({'conversation': ['create_conversation_multiple']}, m)
+
+ recipients = ['2', '3']
+ body = 'Hey guys!'
+
+ conversations = self.canvas.create_conversation(
+ recipients=recipients,
+ body=body
+ )
+ self.assertIsInstance(conversations, list)
+ self.assertEqual(len(conversations), 2)
+
+ self.assertIsInstance(conversations[0], Conversation)
+ self.assertTrue(hasattr(conversations[0], 'last_message'))
+ self.assertEqual(conversations[0].last_message, body)
+
+ self.assertIsInstance(conversations[1], Conversation)
+ self.assertTrue(hasattr(conversations[1], 'last_message'))
+ self.assertEqual(conversations[1].last_message, body)
# get_conversation()
def test_get_conversation(self, m):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"requests_mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ucfopen/canvasapi.git@5644a6c89cc851216ec8114443d3af857ac6f70e#egg=canvasapi
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-xdist==3.6.1
pytz==2025.2
requests==2.32.3
requests-mock==1.12.1
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- pytz==2025.2
- requests==2.32.3
- requests-mock==1.12.1
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_canvas.py::TestCanvas::test_create_conversation",
"tests/test_canvas.py::TestCanvas::test_create_conversation_multiple_people"
] | [
"tests/test_canvas.py::TestCanvas::test_init_deprecate_url_contains_version"
] | [
"tests/test_canvas.py::TestCanvas::test_clear_course_nicknames",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_update",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_updated_fail_on_event",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_updated_fail_on_ids",
"tests/test_canvas.py::TestCanvas::test_conversations_get_running_batches",
"tests/test_canvas.py::TestCanvas::test_conversations_mark_all_as_read",
"tests/test_canvas.py::TestCanvas::test_conversations_unread_count",
"tests/test_canvas.py::TestCanvas::test_create_account",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group_fail_on_context_codes",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group_fail_on_title",
"tests/test_canvas.py::TestCanvas::test_create_calendar_event",
"tests/test_canvas.py::TestCanvas::test_create_calendar_event_fail",
"tests/test_canvas.py::TestCanvas::test_create_group",
"tests/test_canvas.py::TestCanvas::test_get_account",
"tests/test_canvas.py::TestCanvas::test_get_account_fail",
"tests/test_canvas.py::TestCanvas::test_get_account_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_accounts",
"tests/test_canvas.py::TestCanvas::test_get_activity_stream_summary",
"tests/test_canvas.py::TestCanvas::test_get_appointment_group",
"tests/test_canvas.py::TestCanvas::test_get_calendar_event",
"tests/test_canvas.py::TestCanvas::test_get_conversation",
"tests/test_canvas.py::TestCanvas::test_get_conversations",
"tests/test_canvas.py::TestCanvas::test_get_course",
"tests/test_canvas.py::TestCanvas::test_get_course_accounts",
"tests/test_canvas.py::TestCanvas::test_get_course_fail",
"tests/test_canvas.py::TestCanvas::test_get_course_nickname",
"tests/test_canvas.py::TestCanvas::test_get_course_nickname_fail",
"tests/test_canvas.py::TestCanvas::test_get_course_nicknames",
"tests/test_canvas.py::TestCanvas::test_get_course_non_unicode_char",
"tests/test_canvas.py::TestCanvas::test_get_course_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_course_with_start_date",
"tests/test_canvas.py::TestCanvas::test_get_courses",
"tests/test_canvas.py::TestCanvas::test_get_file",
"tests/test_canvas.py::TestCanvas::test_get_group",
"tests/test_canvas.py::TestCanvas::test_get_group_category",
"tests/test_canvas.py::TestCanvas::test_get_group_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_outcome",
"tests/test_canvas.py::TestCanvas::test_get_outcome_group",
"tests/test_canvas.py::TestCanvas::test_get_root_outcome_group",
"tests/test_canvas.py::TestCanvas::test_get_section",
"tests/test_canvas.py::TestCanvas::test_get_section_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_todo_items",
"tests/test_canvas.py::TestCanvas::test_get_upcoming_events",
"tests/test_canvas.py::TestCanvas::test_get_user",
"tests/test_canvas.py::TestCanvas::test_get_user_by_id_type",
"tests/test_canvas.py::TestCanvas::test_get_user_fail",
"tests/test_canvas.py::TestCanvas::test_get_user_self",
"tests/test_canvas.py::TestCanvas::test_list_appointment_groups",
"tests/test_canvas.py::TestCanvas::test_list_calendar_events",
"tests/test_canvas.py::TestCanvas::test_list_group_participants",
"tests/test_canvas.py::TestCanvas::test_list_user_participants",
"tests/test_canvas.py::TestCanvas::test_reserve_time_slot",
"tests/test_canvas.py::TestCanvas::test_reserve_time_slot_by_participant_id",
"tests/test_canvas.py::TestCanvas::test_search_accounts",
"tests/test_canvas.py::TestCanvas::test_search_all_courses",
"tests/test_canvas.py::TestCanvas::test_search_recipients",
"tests/test_canvas.py::TestCanvas::test_set_course_nickname"
] | [] | MIT License | 2,007 | 284 | [
"canvasapi/canvas.py"
] |
|
google__mobly-380 | 7e5e62af4ab4537bf619f0ee403c05f004c5baf0 | 2018-01-04 19:45:31 | 7e5e62af4ab4537bf619f0ee403c05f004c5baf0 | dthkao: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved, some commit checks failed.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/380#-:-L28eYMOEVk9ut_WEarD:bnfp4nl)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py
index e4f05de..4eee55b 100644
--- a/mobly/controllers/android_device.py
+++ b/mobly/controllers/android_device.py
@@ -421,7 +421,7 @@ class AndroidDevice(object):
"""
def __init__(self, serial=''):
- self._serial = serial
+ self._serial = str(serial)
# logging.log_path only exists when this is used in an Mobly test run.
self._log_path_base = getattr(logging, 'log_path', '/tmp/logs')
self._log_path = os.path.join(self._log_path_base,
@@ -570,6 +570,7 @@ class AndroidDevice(object):
Raises:
DeviceError: tries to update serial when any service is running.
"""
+ new_serial = str(new_serial)
if self.has_active_service:
raise DeviceError(
self,
| adb call fails if numeric serial numbers are used
If users use devices whose serials are entirely numeric and do not wrap the serial with quotation mark, we get:
```
Traceback (most recent call last):
File ".../mobly/test_runner.py", line 420, in _register_controller
objects = create(controller_config)
File ".../mobly/controllers/android_device.py", line 87, in create
ads = get_instances_with_configs(configs)
File ".../mobly/controllers/android_device.py", line 257, in get_instances_with_configs
ad = AndroidDevice(serial)
File ".../mobly/controllers/android_device.py", line 402, in __init__
if not self.is_bootloader and self.is_rootable:
File ".../mobly/controllers/android_device.py", line 637, in is_rootable
build_type_output = self.adb.getprop('ro.build.type').lower()
File ".../mobly/controllers/android_device_lib/adb.py", line 175, in getprop
return self.shell('getprop %s' % prop_name).decode('utf-8').strip()
File ".../mobly/controllers/android_device_lib/adb.py", line 199, in adb_call
clean_name, args, shell=shell, timeout=timeout)
File ".../mobly/controllers/android_device_lib/adb.py", line 161, in _exec_adb_cmd
return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout)
File ".../mobly/controllers/android_device_lib/adb.py", line 122, in _exec_cmd
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
File "/usr/lib/python2.7/subprocess.py", line 711, in __init__
errread, errwrite)
File "/usr/lib/python2.7/subprocess.py", line 1343, in _execute_child
raise child_exception
TypeError: execv() arg 2 must contain only strings
```
| google/mobly | diff --git a/tests/mobly/controllers/android_device_test.py b/tests/mobly/controllers/android_device_test.py
index e57db60..f9c6cf3 100755
--- a/tests/mobly/controllers/android_device_test.py
+++ b/tests/mobly/controllers/android_device_test.py
@@ -233,17 +233,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_instantiation(self, MockFastboot, MockAdbProxy):
"""Verifies the AndroidDevice object's basic attributes are correctly
set after instantiation.
"""
mock_serial = 1
ad = android_device.AndroidDevice(serial=mock_serial)
- self.assertEqual(ad.serial, 1)
+ self.assertEqual(ad.serial, '1')
self.assertEqual(ad.model, 'fakemodel')
self.assertIsNone(ad._adb_logcat_process)
self.assertIsNone(ad.adb_logcat_file_path)
@@ -253,29 +253,29 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_build_info(self, MockFastboot, MockAdbProxy):
"""Verifies the AndroidDevice object's basic attributes are correctly
set after instantiation.
"""
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
build_info = ad.build_info
self.assertEqual(build_info['build_id'], 'AB42')
self.assertEqual(build_info['build_type'], 'userdebug')
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_device_info(self, MockFastboot, MockAdbProxy):
ad = android_device.AndroidDevice(serial=1)
device_info = ad.device_info
- self.assertEqual(device_info['serial'], 1)
+ self.assertEqual(device_info['serial'], '1')
self.assertEqual(device_info['model'], 'fakemodel')
self.assertEqual(device_info['build_info']['build_id'], 'AB42')
self.assertEqual(device_info['build_info']['build_type'], 'userdebug')
@@ -297,7 +297,7 @@ class AndroidDeviceTest(unittest.TestCase):
"""Verifies AndroidDevice.take_bug_report calls the correct adb command
and writes the bugreport file to the correct path.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.take_bug_report('test_something', 'sometime')
expected_path = os.path.join(
@@ -306,17 +306,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1, fail_br=True))
+ return_value=mock_android_device.MockAdbProxy('1', fail_br=True))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
def test_AndroidDevice_take_bug_report_fail(self, create_dir_mock,
FastbootProxy, MockAdbProxy):
"""Verifies AndroidDevice.take_bug_report writes out the correct message
when taking bugreport fails.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
expected_msg = '.* Failed to take bugreport.'
with self.assertRaisesRegex(android_device.Error, expected_msg):
@@ -324,14 +324,14 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
def test_AndroidDevice_take_bug_report_with_destination(
self, create_dir_mock, FastbootProxy, MockAdbProxy):
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
dest = tempfile.gettempdir()
ad.take_bug_report("test_something", "sometime", destination=dest)
@@ -341,17 +341,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy(
- 1, fail_br_before_N=True))
+ '1', fail_br_before_N=True))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
def test_AndroidDevice_take_bug_report_fallback(
self, create_dir_mock, FastbootProxy, MockAdbProxy):
"""Verifies AndroidDevice.take_bug_report falls back to traditional
bugreport on builds that do not have bugreportz.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.take_bug_report('test_something', 'sometime')
expected_path = os.path.join(
@@ -360,10 +360,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -375,7 +375,7 @@ class AndroidDeviceTest(unittest.TestCase):
object, including various function calls and the expected behaviors of
the calls.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
expected_msg = '.* No ongoing adb logcat collection found.'
# Expect error if stop is called before start.
@@ -406,10 +406,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -421,7 +421,7 @@ class AndroidDeviceTest(unittest.TestCase):
object, including various function calls and the expected behaviors of
the calls.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.adb_logcat_param = '-b radio'
expected_msg = '.* No ongoing adb logcat collection found.'
@@ -442,17 +442,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_AndroidDevice_change_log_path(self, stop_proc_mock,
start_proc_mock, FastbootProxy,
MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.start_adb_logcat()
ad.stop_adb_logcat()
old_path = ad.log_path
@@ -463,17 +463,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_AndroidDevice_change_log_path_no_log_exists(
self, stop_proc_mock, start_proc_mock, FastbootProxy,
MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
old_path = ad.log_path
new_log_path = tempfile.mkdtemp()
ad.log_path = new_log_path
@@ -482,10 +482,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -493,7 +493,7 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_change_log_path_with_service(
self, stop_proc_mock, start_proc_mock, creat_dir_mock,
FastbootProxy, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.start_adb_logcat()
new_log_path = tempfile.mkdtemp()
expected_msg = '.* Cannot change `log_path` when there is service running.'
@@ -502,10 +502,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -513,7 +513,7 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_change_log_path_with_existing_file(
self, stop_proc_mock, start_proc_mock, creat_dir_mock,
FastbootProxy, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
new_log_path = tempfile.mkdtemp()
with open(os.path.join(new_log_path, 'file.txt'), 'w') as f:
f.write('hahah.')
@@ -523,10 +523,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -534,19 +534,19 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_update_serial(self, stop_proc_mock, start_proc_mock,
creat_dir_mock, FastbootProxy,
MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
- ad.update_serial(2)
- self.assertEqual(ad.serial, 2)
+ ad = android_device.AndroidDevice(serial='1')
+ ad.update_serial('2')
+ self.assertEqual(ad.serial, '2')
self.assertEqual(ad.debug_tag, ad.serial)
self.assertEqual(ad.adb.serial, ad.serial)
self.assertEqual(ad.fastboot.serial, ad.serial)
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -554,18 +554,18 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_update_serial_with_service_running(
self, stop_proc_mock, start_proc_mock, creat_dir_mock,
FastbootProxy, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.start_adb_logcat()
expected_msg = '.* Cannot change device serial number when there is service running.'
with self.assertRaisesRegex(android_device.Error, expected_msg):
- ad.update_serial(2)
+ ad.update_serial('2')
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@@ -579,7 +579,7 @@ class AndroidDeviceTest(unittest.TestCase):
file, locates the correct adb log lines within the given time range,
and writes the lines to the correct output file.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
# Direct the log path of the ad to a temp dir to avoid racing.
ad._log_path_base = self.tmp_dir
@@ -606,32 +606,32 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet(self, MockGetPort, MockSnippetClient,
MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
self.assertTrue(hasattr(ad, 'snippet'))
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient',
return_value=MockSnippetClient)
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet_dup_package(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
expected_msg = ('Snippet package "%s" has already been loaded under '
'name "snippet".') % MOCK_SNIPPET_PACKAGE_NAME
@@ -640,17 +640,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient',
return_value=MockSnippetClient)
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet_dup_snippet_name(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
expected_msg = ('Attribute "%s" is already registered with package '
'"%s", it cannot be used again.') % (
@@ -660,16 +660,16 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet_dup_attribute_name(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
expected_msg = ('Attribute "%s" already exists, please use a different'
' name') % 'adb'
with self.assertRaisesRegex(android_device.Error, expected_msg):
@@ -677,10 +677,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
@@ -697,7 +697,7 @@ class AndroidDeviceTest(unittest.TestCase):
side_effect=expected_e)
MockSnippetClient.stop_app = mock.Mock(
side_effect=Exception('stop failed.'))
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
try:
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
except Exception as e:
@@ -705,30 +705,30 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_snippet_cleanup(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
ad.stop_services()
self.assertFalse(hasattr(ad, 'snippet'))
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_debug_tag(self, MockFastboot, MockAdbProxy):
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
- self.assertEqual(ad.debug_tag, 1)
+ self.assertEqual(ad.debug_tag, '1')
try:
raise android_device.DeviceError(ad, 'Something')
except android_device.DeviceError as e:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@7e5e62af4ab4537bf619f0ee403c05f004c5baf0#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_device_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_instantiation"
] | [] | [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_build_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_no_log_exists",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_existing_file",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_service",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_debug_tag",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_attribute_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_package",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_snippet_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_start_app_fails",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_snippet_cleanup",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fail",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fallback",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_with_destination",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat_with_user_param",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial_with_service_running",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_dict_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_empty_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_no_valid_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_not_list_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_pickup_all",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_string_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_usb_id",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial_and_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_too_many_matches",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_success_with_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads_skip_logcat"
] | [] | Apache License 2.0 | 2,009 | 222 | [
"mobly/controllers/android_device.py"
] |
nylas__nylas-python-98 | 068a626706188eea00b606608ab06c9b9ea307fb | 2018-01-04 22:28:49 | 068a626706188eea00b606608ab06c9b9ea307fb | diff --git a/nylas/client/restful_models.py b/nylas/client/restful_models.py
index d8d1c98..7f31653 100644
--- a/nylas/client/restful_models.py
+++ b/nylas/client/restful_models.py
@@ -387,18 +387,27 @@ class File(NylasAPIObject):
collection_name = 'files'
def save(self): # pylint: disable=arguments-differ
- if hasattr(self, 'stream') and self.stream is not None:
- data = {self.filename: self.stream}
- elif hasattr(self, 'data') and self.data is not None:
- data = {self.filename: StringIO(self.data)}
- else:
+ stream = getattr(self, "stream", None)
+ if not stream:
+ data = getattr(self, "data", None)
+ if data:
+ stream = StringIO(data)
+
+ if not stream:
message = (
"File object not properly formatted, "
"must provide either a stream or data."
)
raise FileUploadError(message=message)
- new_obj = self.api._create_resources(File, data)
+ file_info = (
+ self.filename,
+ stream,
+ self.content_type,
+ {}, # upload headers
+ )
+
+ new_obj = self.api._create_resources(File, {"file": file_info})
new_obj = new_obj[0]
for attr in self.attrs:
if hasattr(new_obj, attr):
| File.filename doesn't apply when type is stream:
Setting a filename on an attachment only works when using `.data`. When `.stream` is used, it falls back to the name of the file on the uploading system.
```
myfile = self.nylas_client.files.create()
myfile.content_type = 'application/pdf'
myfile.filename = attachment_name
with open(attachment_path, 'rb') as f:
myfile.stream = f
myfile.save()
myfile.filename = attachment_name
# Create a new draft
draft = self.nylas_client.drafts.create()
if type(recipients) == str:
recipients = [recipients]
draft.to = [{'email': recipient} for recipient in recipients]
draft.subject = subject
draft.body = message
draft.attach(myfile)
``` | nylas/nylas-python | diff --git a/tests/conftest.py b/tests/conftest.py
index f8fb9ad..4e8eb72 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,6 +1,7 @@
import re
import json
import copy
+import cgi
import random
import string
import pytest
@@ -674,24 +675,59 @@ def mock_draft_sent_response(mocked_responses, api_url):
@pytest.fixture
-def mock_files(mocked_responses, api_url):
- body = [{
- "content_type": "text/plain",
- "filename": "a.txt",
- "id": "3qfe4k3siosfjtjpfdnon8zbn",
- "account_id": "6aakaxzi4j5gn6f7kbb9e0fxs",
- "object": "file",
- "size": 762878
- }]
+def mock_files(mocked_responses, api_url, account_id):
+ files_content = {
+ "3qfe4k3siosfjtjpfdnon8zbn": b"Hello, World!",
+ }
+ files_metadata = {
+ "3qfe4k3siosfjtjpfdnon8zbn": {
+ "id": "3qfe4k3siosfjtjpfdnon8zbn",
+ "content_type": "text/plain",
+ "filename": "hello.txt",
+ "account_id": account_id,
+ "object": "file",
+ "size": len(files_content["3qfe4k3siosfjtjpfdnon8zbn"])
+ }
+ }
mocked_responses.add(
+ responses.GET,
+ api_url + '/files',
+ body=json.dumps(list(files_metadata.values())),
+ )
+ for file_id in files_content:
+ mocked_responses.add(
+ responses.POST,
+ "{base}/files/{file_id}".format(base=api_url, file_id=file_id),
+ body=json.dumps(files_metadata[file_id]),
+ )
+ mocked_responses.add(
+ responses.GET,
+ "{base}/files/{file_id}/download".format(base=api_url, file_id=file_id),
+ body=files_content[file_id],
+ )
+
+ def create_callback(request):
+ uploaded_lines = request.body.decode('utf8').splitlines()
+ content_disposition = uploaded_lines[1]
+ _, params = cgi.parse_header(content_disposition)
+ filename = params.get("filename", None)
+ content = "".join(uploaded_lines[3:-1])
+ size = len(content.encode('utf8'))
+
+ body = [{
+ "id": generate_id(),
+ "content_type": "text/plain",
+ "filename": filename,
+ "account_id": account_id,
+ "object": "file",
+ "size": size,
+ }]
+ return (200, {}, json.dumps(body))
+
+ mocked_responses.add_callback(
responses.POST,
api_url + '/files/',
- body=json.dumps(body),
- )
- mocked_responses.add(
- responses.GET,
- api_url + '/files/3qfe4k3siosfjtjpfdnon8zbn/download',
- body='test body',
+ callback=create_callback,
)
diff --git a/tests/test_files.py b/tests/test_files.py
index 046c274..8da27fa 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -1,19 +1,65 @@
+import cgi
+from io import BytesIO
import pytest
from nylas.client.errors import FileUploadError
@pytest.mark.usefixtures("mock_files")
-def test_file_upload(api_client):
+def test_file_upload_data(api_client, mocked_responses):
+ data = "Hello, World!"
+
myfile = api_client.files.create()
- myfile.filename = 'test.txt'
- myfile.data = "Hello World."
+ myfile.filename = 'hello.txt'
+ myfile.data = data
+
+ assert not mocked_responses.calls
myfile.save()
+ assert len(mocked_responses.calls) == 1
+
+ assert myfile.filename == 'hello.txt'
+ assert myfile.size == 13
- assert myfile.filename == 'a.txt'
- assert myfile.size == 762878
+ upload_body = mocked_responses.calls[0].request.body
+ upload_lines = upload_body.decode("utf8").splitlines()
+
+ content_disposition = upload_lines[1]
+ _, params = cgi.parse_header(content_disposition)
+ assert params["filename"] == "hello.txt"
+ assert "Hello, World!" in upload_lines
+
+
[email protected]("mock_files")
+def test_file_upload_stream(api_client, mocked_responses):
+ stream = BytesIO(b"Hello, World!")
+ stream.name = "wacky.txt"
+ myfile = api_client.files.create()
+ myfile.filename = 'hello.txt'
+ myfile.stream = stream
+ assert not mocked_responses.calls
+ myfile.save()
+ assert len(mocked_responses.calls) == 1
+
+ assert myfile.filename == 'hello.txt'
+ assert myfile.size == 13
+
+ upload_body = mocked_responses.calls[0].request.body
+ upload_lines = upload_body.decode("utf8").splitlines()
+
+ content_disposition = upload_lines[1]
+ _, params = cgi.parse_header(content_disposition)
+ assert params["filename"] == "hello.txt"
+ assert "Hello, World!" in upload_lines
+
+
[email protected]("mock_files")
+def test_file_download(api_client, mocked_responses):
+ assert not mocked_responses.calls
+ myfile = api_client.files.first()
+ assert len(mocked_responses.calls) == 1
data = myfile.download().decode()
- assert data == 'test body'
+ assert len(mocked_responses.calls) == 2
+ assert data == "Hello, World!"
def test_file_invalid_upload(api_client):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-pylint",
"responses"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"examples/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==2.11.7
attrs==22.2.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
cookies==2.2.1
coverage==6.2
cryptography==40.0.2
dill==0.3.4
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
ndg-httpsclient==0.5.1
-e git+https://github.com/nylas/nylas-python.git@068a626706188eea00b606608ab06c9b9ea307fb#egg=nylas
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pyasn1==0.5.1
pycparser==2.21
pylint==2.13.9
pyOpenSSL==23.2.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-pylint==0.18.0
requests==2.27.1
responses==0.6.1
six==1.17.0
toml==0.10.2
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
URLObject==2.4.3
wrapt==1.16.0
zipp==3.6.0
| name: nylas-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==2.11.7
- attrs==22.2.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- cookies==2.2.1
- coverage==6.2
- cryptography==40.0.2
- dill==0.3.4
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- ndg-httpsclient==0.5.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pyasn1==0.5.1
- pycparser==2.21
- pylint==2.13.9
- pyopenssl==23.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-pylint==0.18.0
- requests==2.27.1
- responses==0.6.1
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- urlobject==2.4.3
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/nylas-python
| [
"tests/test_files.py::test_file_upload_stream"
] | [] | [
"tests/test_files.py::test_file_upload_data",
"tests/test_files.py::test_file_download",
"tests/test_files.py::test_file_invalid_upload",
"tests/test_files.py::test_file_upload_errors"
] | [] | MIT License | 2,010 | 345 | [
"nylas/client/restful_models.py"
] |
|
joke2k__faker-671 | fc7bbc43efc9adf923b58b43db6c979a3c17ac86 | 2018-01-05 18:03:46 | 29dff0a0f2a31edac21a18cfa50b5bc9206304b2 | diff --git a/faker/providers/date_time/__init__.py b/faker/providers/date_time/__init__.py
index 8d1d8ff6..5df3c699 100644
--- a/faker/providers/date_time/__init__.py
+++ b/faker/providers/date_time/__init__.py
@@ -1342,7 +1342,10 @@ class Provider(BaseProvider):
"""
start_date = self._parse_date_time(start_date, tzinfo=tzinfo)
end_date = self._parse_date_time(end_date, tzinfo=tzinfo)
- ts = self.generator.random.randint(start_date, end_date)
+ if end_date - start_date <= 1:
+ ts = start_date + self.generator.random.random()
+ else:
+ ts = self.generator.random.randint(start_date, end_date)
return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts)
def date_between(self, start_date='-30y', end_date='today'):
| past_datetime() crashes when start_date is within one second of now
I ran into a problem while generating a bunch of fake dates where I was calling `fake.past_datetime()` with the start date using the result of a previous `fake.past_datetime()` call (simulating a bunch of files being created as part of a collection). The first call could randomly return a value which is within one second of the current time, causing the second call to trigger an exception from `random.randint()` because the `a` and `b` values were the same.
This was pretty easy to solve simply by using `fake.date_time_between(start_date=previous_created_date)` but it might be useful if `past_datetime()` was safe to call like this. | joke2k/faker | diff --git a/tests/providers/test_date_time.py b/tests/providers/test_date_time.py
index f34250e6..1c81f7c3 100644
--- a/tests/providers/test_date_time.py
+++ b/tests/providers/test_date_time.py
@@ -191,6 +191,10 @@ class TestDateTime(unittest.TestCase):
self.assertTrue(datetime_start <= random_date)
self.assertTrue(datetime_end >= random_date)
+ def test_past_datetime_within_second(self):
+ # Should not raise a ``ValueError``
+ self.factory.past_datetime(start_date='+1s')
+
def test_date_between_dates(self):
date_end = date.today()
date_start = date_end - timedelta(days=10)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"tests/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
dnspython==2.2.1
email-validator==1.0.3
-e git+https://github.com/joke2k/faker.git@fc7bbc43efc9adf923b58b43db6c979a3c17ac86#egg=Faker
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==2.0.0
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
six==1.17.0
text-unidecode==1.3
tomli==1.2.3
typing_extensions==4.1.1
UkPostcodeParser==1.1.2
zipp==3.6.0
| name: faker
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- dnspython==2.2.1
- email-validator==1.0.3
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==2.0.0
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- six==1.17.0
- text-unidecode==1.3
- tomli==1.2.3
- typing-extensions==4.1.1
- ukpostcodeparser==1.1.2
- zipp==3.6.0
prefix: /opt/conda/envs/faker
| [
"tests/providers/test_date_time.py::TestDateTime::test_past_datetime_within_second"
] | [] | [
"tests/providers/test_date_time.py::TestKoKR::test_day",
"tests/providers/test_date_time.py::TestKoKR::test_month",
"tests/providers/test_date_time.py::TestDateTime::test_date_between",
"tests/providers/test_date_time.py::TestDateTime::test_date_between_dates",
"tests/providers/test_date_time.py::TestDateTime::test_date_object",
"tests/providers/test_date_time.py::TestDateTime::test_date_this_period",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_between",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_between_dates",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_between_dates_with_tzinfo",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_this_period",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_this_period_with_tzinfo",
"tests/providers/test_date_time.py::TestDateTime::test_datetime_safe",
"tests/providers/test_date_time.py::TestDateTime::test_datetime_safe_new_date",
"tests/providers/test_date_time.py::TestDateTime::test_datetimes_with_and_without_tzinfo",
"tests/providers/test_date_time.py::TestDateTime::test_day",
"tests/providers/test_date_time.py::TestDateTime::test_future_date",
"tests/providers/test_date_time.py::TestDateTime::test_future_datetime",
"tests/providers/test_date_time.py::TestDateTime::test_month",
"tests/providers/test_date_time.py::TestDateTime::test_parse_date",
"tests/providers/test_date_time.py::TestDateTime::test_parse_date_time",
"tests/providers/test_date_time.py::TestDateTime::test_parse_timedelta",
"tests/providers/test_date_time.py::TestDateTime::test_past_date",
"tests/providers/test_date_time.py::TestDateTime::test_past_datetime",
"tests/providers/test_date_time.py::TestDateTime::test_time_object",
"tests/providers/test_date_time.py::TestDateTime::test_time_series",
"tests/providers/test_date_time.py::TestDateTime::test_timezone_conversion",
"tests/providers/test_date_time.py::TestPlPL::test_day",
"tests/providers/test_date_time.py::TestPlPL::test_month",
"tests/providers/test_date_time.py::TestAr::test_ar_aa",
"tests/providers/test_date_time.py::TestAr::test_ar_eg"
] | [] | MIT License | 2,015 | 235 | [
"faker/providers/date_time/__init__.py"
] |
|
mrocklin__sparse-67 | 9be8058708127553d543a61001843e7064f885b9 | 2018-01-06 05:55:11 | bbb0869c882b914124c44e789214d945ff785aa4 | hameerabbasi: Hello, the correct way to fix this would be to modify `__rmul__` to return `scalar * COO`. Fixing it this way causes the broadcasting path to be taken which will be much, much slower and will fail for scalar `COO` objects, returning 1-D instead of scalar.
fujiisoup: Hi, @hameerabbasi
Thanks for the quick response.
> the correct way to fix this would be to modify __rmul__ to return scalar * COO
`np.number` has `__mul__` method and thus `COO.__rmul__` will not be called.
Instead, `np.number.__mul__` calls `COO.__array_ufunc__`.
Another option would be to have a special treatment for the `func is operator.mul` case in `__array_ufunc__`, but the code would be more complicated.
hameerabbasi: Hey! I came up with a long-term solution to this, hope it will be of use.
```
@staticmethod
def _elemwise(func, *args, **kwargs):
assert len(args) >= 1
self = args[0]
if isinstance(self, scipy.sparse.spmatrix):
self = COO.from_numpy(self)
elif np.isscalar(self):
func = partial(func, self)
other = args[1]
if isinstance(other, scipy.sparse.spmatrix):
other = COO.from_scipy_sparse(other)
return other._elemwise_unary(func, *args[2:], **kwargs)
if len(args) == 1:
return self._elemwise_unary(func, *args[1:], **kwargs)
else:
other = args[1]
if isinstance(other, scipy.sparse.spmatrix):
other = COO.from_scipy_sparse(other)
if isinstance(other, COO):
return self._elemwise_binary(func, *args[1:], **kwargs)
else:
return self._elemwise_unary(func, *args[1:], **kwargs)
```
If you check this in, maybe @mrocklin can review it.
hameerabbasi: @fujiisoup I've edited the elemwise to account for the unit test failure.
fujiisoup: Cool!!
I added more tests for left-side operations.
Additionally, I added a support also for 0-dimensional `np.ndarray`.
hameerabbasi: Would you be kind enough to rebase this on `master`? Your other PR caused conflicts with this one.
hameerabbasi: Just waiting on either of @nils-werner or @mrocklin to review these changes... Since I was involved in adding code, I shouldn't be the one to review it. :-) | diff --git a/sparse/core.py b/sparse/core.py
index e455a9d..9a28145 100644
--- a/sparse/core.py
+++ b/sparse/core.py
@@ -1,7 +1,7 @@
from __future__ import absolute_import, division, print_function
from collections import Iterable, defaultdict, deque
-from functools import reduce
+from functools import reduce, partial
import numbers
import operator
@@ -728,6 +728,13 @@ class COO(object):
self = args[0]
if isinstance(self, scipy.sparse.spmatrix):
self = COO.from_numpy(self)
+ elif np.isscalar(self) or (isinstance(self, np.ndarray)
+ and self.ndim == 0):
+ func = partial(func, self)
+ other = args[1]
+ if isinstance(other, scipy.sparse.spmatrix):
+ other = COO.from_numpy(other)
+ return other._elemwise_unary(func, *args[2:], **kwargs)
if len(args) == 1:
return self._elemwise_unary(func, *args[1:], **kwargs)
| left-side np.scalar multiplication
Hi,
I noticed multiplication with `np.scalar` such as `np.float32` fails,
```python
In [1]: import numpy as np
...: import sparse
...: x = sparse.random((2, 3, 4), density=0.5)
...: x * np.float32(2.0) # This succeeds
...:
Out[1]: <COO: shape=(2, 3, 4), dtype=float64, nnz=12, sorted=False, duplicates=True>
In [2]: np.float32(2.0) * x # fails
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-e1d24f7d85b3> in <module>()
----> 1 np.float32(2.0) * x # fails
~/Dropbox/projects/sparse/sparse/core.py in __array_ufunc__(self, ufunc, method, *inputs, **kwargs)
491 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
492 if method == '__call__':
--> 493 return COO._elemwise(ufunc, *inputs, **kwargs)
494 elif method == 'reduce':
495 return COO._reduce(ufunc, *inputs, **kwargs)
~/Dropbox/projects/sparse/sparse/core.py in _elemwise(func, *args, **kwargs)
735 other = args[1]
736 if isinstance(other, COO):
--> 737 return self._elemwise_binary(func, *args[1:], **kwargs)
738 elif isinstance(other, scipy.sparse.spmatrix):
739 other = COO.from_scipy_sparse(other)
AttributeError: 'numpy.float32' object has no attribute '_elemwise_binary'
``` | mrocklin/sparse | diff --git a/sparse/tests/test_core.py b/sparse/tests/test_core.py
index 394abe7..2858e02 100644
--- a/sparse/tests/test_core.py
+++ b/sparse/tests/test_core.py
@@ -265,8 +265,11 @@ def test_op_scipy_sparse():
(operator.le, -3),
(operator.eq, 1)
])
-def test_elemwise_scalar(func, scalar):
[email protected]('convert_to_np_number', [True, False])
+def test_elemwise_scalar(func, scalar, convert_to_np_number):
xs = sparse.random((2, 3, 4), density=0.5)
+ if convert_to_np_number:
+ scalar = np.float32(scalar)
y = scalar
x = xs.todense()
@@ -278,6 +281,33 @@ def test_elemwise_scalar(func, scalar):
assert_eq(fs, func(x, y))
[email protected]('func, scalar', [
+ (operator.mul, 5),
+ (operator.add, 0),
+ (operator.sub, 0),
+ (operator.gt, -5),
+ (operator.lt, 5),
+ (operator.ne, 0),
+ (operator.ge, -5),
+ (operator.le, 3),
+ (operator.eq, 1)
+])
[email protected]('convert_to_np_number', [True, False])
+def test_leftside_elemwise_scalar(func, scalar, convert_to_np_number):
+ xs = sparse.random((2, 3, 4), density=0.5)
+ if convert_to_np_number:
+ scalar = np.float32(scalar)
+ y = scalar
+
+ x = xs.todense()
+ fs = func(y, xs)
+
+ assert isinstance(fs, COO)
+ assert xs.nnz >= fs.nnz
+
+ assert_eq(fs, func(y, x))
+
+
@pytest.mark.parametrize('func, scalar', [
(operator.add, 5),
(operator.sub, -5),
@@ -601,15 +631,19 @@ def test_broadcast_to(shape1, shape2):
assert_eq(np.broadcast_to(x, shape2), a.broadcast_to(shape2))
-def test_scalar_multiplication():
[email protected]('scalar', [2, 2.5, np.float32(2.0), np.int8(3)])
+def test_scalar_multiplication(scalar):
a = sparse.random((2, 3, 4), density=0.5)
x = a.todense()
- assert_eq(x * 2, a * 2)
- assert_eq(2 * x, 2 * a)
- assert_eq(x / 2, a / 2)
- assert_eq(x / 2.5, a / 2.5)
- assert_eq(x // 2.5, a // 2.5)
+ assert_eq(x * scalar, a * scalar)
+ assert (a * scalar).nnz == a.nnz
+ assert_eq(scalar * x, scalar * a)
+ assert (scalar * a).nnz == a.nnz
+ assert_eq(x / scalar, a / scalar)
+ assert (a / scalar).nnz == a.nnz
+ assert_eq(x // scalar, a // scalar)
+ # division may reduce nnz.
@pytest.mark.filterwarnings('ignore:divide by zero')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-flake8==1.1.1
scipy==1.5.4
-e git+https://github.com/mrocklin/sparse.git@9be8058708127553d543a61001843e7064f885b9#egg=sparse
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: sparse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-flake8==1.1.1
- scipy==1.5.4
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/sparse
| [
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-mul-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-add-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-sub-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-gt--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-lt-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-ge--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-le-3]",
"sparse/tests/test_core.py::test_scalar_multiplication[scalar2]",
"sparse/tests/test_core.py::test_scalar_multiplication[scalar3]"
] | [
"sparse/__init__.py::flake-8::FLAKE8",
"sparse/core.py::flake-8::FLAKE8",
"sparse/core.py::sparse.core.COO",
"sparse/slicing.py::flake-8::FLAKE8",
"sparse/utils.py::flake-8::FLAKE8",
"sparse/tests/test_core.py::flake-8::FLAKE8"
] | [
"sparse/core.py::sparse.core.random",
"sparse/slicing.py::sparse.slicing.check_index",
"sparse/slicing.py::sparse.slicing.normalize_index",
"sparse/slicing.py::sparse.slicing.normalize_slice",
"sparse/slicing.py::sparse.slicing.posify_index",
"sparse/slicing.py::sparse.slicing.replace_ellipsis",
"sparse/slicing.py::sparse.slicing.sanitize_index",
"sparse/tests/test_core.py::test_reductions[True-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_transpose[None]",
"sparse/tests/test_core.py::test_transpose[axis1]",
"sparse/tests/test_core.py::test_transpose[axis2]",
"sparse/tests/test_core.py::test_transpose[axis3]",
"sparse/tests/test_core.py::test_transpose[axis4]",
"sparse/tests/test_core.py::test_transpose[axis5]",
"sparse/tests/test_core.py::test_transpose[axis6]",
"sparse/tests/test_core.py::test_transpose_error[axis0]",
"sparse/tests/test_core.py::test_transpose_error[axis1]",
"sparse/tests/test_core.py::test_transpose_error[axis2]",
"sparse/tests/test_core.py::test_transpose_error[axis3]",
"sparse/tests/test_core.py::test_transpose_error[axis4]",
"sparse/tests/test_core.py::test_transpose_error[axis5]",
"sparse/tests/test_core.py::test_reshape[a0-b0]",
"sparse/tests/test_core.py::test_reshape[a1-b1]",
"sparse/tests/test_core.py::test_reshape[a2-b2]",
"sparse/tests/test_core.py::test_reshape[a3-b3]",
"sparse/tests/test_core.py::test_reshape[a4-b4]",
"sparse/tests/test_core.py::test_reshape[a5-b5]",
"sparse/tests/test_core.py::test_reshape[a6-b6]",
"sparse/tests/test_core.py::test_reshape[a7-b7]",
"sparse/tests/test_core.py::test_reshape[a8-b8]",
"sparse/tests/test_core.py::test_reshape[a9-b9]",
"sparse/tests/test_core.py::test_large_reshape",
"sparse/tests/test_core.py::test_reshape_same",
"sparse/tests/test_core.py::test_to_scipy_sparse",
"sparse/tests/test_core.py::test_tensordot[a_shape0-b_shape0-axes0]",
"sparse/tests/test_core.py::test_tensordot[a_shape1-b_shape1-axes1]",
"sparse/tests/test_core.py::test_tensordot[a_shape2-b_shape2-axes2]",
"sparse/tests/test_core.py::test_tensordot[a_shape3-b_shape3-axes3]",
"sparse/tests/test_core.py::test_tensordot[a_shape4-b_shape4-axes4]",
"sparse/tests/test_core.py::test_tensordot[a_shape5-b_shape5-axes5]",
"sparse/tests/test_core.py::test_tensordot[a_shape6-b_shape6-axes6]",
"sparse/tests/test_core.py::test_tensordot[a_shape7-b_shape7-axes7]",
"sparse/tests/test_core.py::test_tensordot[a_shape8-b_shape8-axes8]",
"sparse/tests/test_core.py::test_tensordot[a_shape9-b_shape9-0]",
"sparse/tests/test_core.py::test_dot",
"sparse/tests/test_core.py::test_elemwise[expm1]",
"sparse/tests/test_core.py::test_elemwise[log1p]",
"sparse/tests/test_core.py::test_elemwise[sin]",
"sparse/tests/test_core.py::test_elemwise[tan]",
"sparse/tests/test_core.py::test_elemwise[sinh]",
"sparse/tests/test_core.py::test_elemwise[tanh]",
"sparse/tests/test_core.py::test_elemwise[floor]",
"sparse/tests/test_core.py::test_elemwise[ceil]",
"sparse/tests/test_core.py::test_elemwise[sqrt]",
"sparse/tests/test_core.py::test_elemwise[conjugate0]",
"sparse/tests/test_core.py::test_elemwise[round_]",
"sparse/tests/test_core.py::test_elemwise[rint]",
"sparse/tests/test_core.py::test_elemwise[<lambda>0]",
"sparse/tests/test_core.py::test_elemwise[conjugate1]",
"sparse/tests/test_core.py::test_elemwise[conjugate2]",
"sparse/tests/test_core.py::test_elemwise[<lambda>1]",
"sparse/tests/test_core.py::test_elemwise[abs]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-ne]",
"sparse/tests/test_core.py::test_auto_densification_fails[pow]",
"sparse/tests/test_core.py::test_auto_densification_fails[truediv]",
"sparse/tests/test_core.py::test_auto_densification_fails[floordiv]",
"sparse/tests/test_core.py::test_auto_densification_fails[ge]",
"sparse/tests/test_core.py::test_auto_densification_fails[le]",
"sparse/tests/test_core.py::test_auto_densification_fails[eq]",
"sparse/tests/test_core.py::test_op_scipy_sparse",
"sparse/tests/test_core.py::test_elemwise_scalar[True-mul-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-add-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-sub-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-pow-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-truediv-3]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-floordiv-4]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-gt-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-lt--5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-ne-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-ge-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-le--3]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-eq-1]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-mul-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-add-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-sub-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-pow-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-truediv-3]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-floordiv-4]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-gt-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-lt--5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-ne-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-ge-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-le--3]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-eq-1]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-ne-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-eq-1]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-mul-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-add-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-sub-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-gt--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-lt-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-ne-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-ge--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-le-3]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-eq-1]",
"sparse/tests/test_core.py::test_scalar_densification_fails[add-5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[sub--5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[pow--3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[truediv-0]",
"sparse/tests/test_core.py::test_scalar_densification_fails[floordiv-0]",
"sparse/tests/test_core.py::test_scalar_densification_fails[gt--5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[lt-5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[ne-1]",
"sparse/tests/test_core.py::test_scalar_densification_fails[ge--3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[le-3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[eq-0]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-xor]",
"sparse/tests/test_core.py::test_elemwise_binary_empty",
"sparse/tests/test_core.py::test_gt",
"sparse/tests/test_core.py::test_slicing[0]",
"sparse/tests/test_core.py::test_slicing[1]",
"sparse/tests/test_core.py::test_slicing[-1]",
"sparse/tests/test_core.py::test_slicing[index3]",
"sparse/tests/test_core.py::test_slicing[index4]",
"sparse/tests/test_core.py::test_slicing[index5]",
"sparse/tests/test_core.py::test_slicing[index6]",
"sparse/tests/test_core.py::test_slicing[index7]",
"sparse/tests/test_core.py::test_slicing[index8]",
"sparse/tests/test_core.py::test_slicing[index9]",
"sparse/tests/test_core.py::test_slicing[index10]",
"sparse/tests/test_core.py::test_slicing[index11]",
"sparse/tests/test_core.py::test_slicing[index12]",
"sparse/tests/test_core.py::test_slicing[index13]",
"sparse/tests/test_core.py::test_slicing[index14]",
"sparse/tests/test_core.py::test_slicing[index15]",
"sparse/tests/test_core.py::test_slicing[index16]",
"sparse/tests/test_core.py::test_slicing[index17]",
"sparse/tests/test_core.py::test_slicing[index18]",
"sparse/tests/test_core.py::test_slicing[index19]",
"sparse/tests/test_core.py::test_slicing[index20]",
"sparse/tests/test_core.py::test_slicing[index21]",
"sparse/tests/test_core.py::test_slicing[index22]",
"sparse/tests/test_core.py::test_slicing[index23]",
"sparse/tests/test_core.py::test_slicing[index24]",
"sparse/tests/test_core.py::test_slicing[index25]",
"sparse/tests/test_core.py::test_slicing[index26]",
"sparse/tests/test_core.py::test_slicing[index27]",
"sparse/tests/test_core.py::test_slicing[index28]",
"sparse/tests/test_core.py::test_slicing[index29]",
"sparse/tests/test_core.py::test_slicing[index30]",
"sparse/tests/test_core.py::test_slicing[index31]",
"sparse/tests/test_core.py::test_slicing[index32]",
"sparse/tests/test_core.py::test_slicing[index33]",
"sparse/tests/test_core.py::test_slicing[index34]",
"sparse/tests/test_core.py::test_slicing[index35]",
"sparse/tests/test_core.py::test_slicing[index36]",
"sparse/tests/test_core.py::test_slicing[index37]",
"sparse/tests/test_core.py::test_slicing[index38]",
"sparse/tests/test_core.py::test_slicing[index39]",
"sparse/tests/test_core.py::test_slicing[index40]",
"sparse/tests/test_core.py::test_slicing[index41]",
"sparse/tests/test_core.py::test_slicing[index42]",
"sparse/tests/test_core.py::test_slicing[index43]",
"sparse/tests/test_core.py::test_slicing[index44]",
"sparse/tests/test_core.py::test_custom_dtype_slicing",
"sparse/tests/test_core.py::test_slicing_errors[index0]",
"sparse/tests/test_core.py::test_slicing_errors[index1]",
"sparse/tests/test_core.py::test_slicing_errors[index2]",
"sparse/tests/test_core.py::test_slicing_errors[5]",
"sparse/tests/test_core.py::test_slicing_errors[-5]",
"sparse/tests/test_core.py::test_slicing_errors[foo]",
"sparse/tests/test_core.py::test_slicing_errors[index6]",
"sparse/tests/test_core.py::test_canonical",
"sparse/tests/test_core.py::test_concatenate",
"sparse/tests/test_core.py::test_concatenate_mixed[stack-0]",
"sparse/tests/test_core.py::test_concatenate_mixed[stack-1]",
"sparse/tests/test_core.py::test_concatenate_mixed[concatenate-0]",
"sparse/tests/test_core.py::test_concatenate_mixed[concatenate-1]",
"sparse/tests/test_core.py::test_stack[0-shape0]",
"sparse/tests/test_core.py::test_stack[0-shape1]",
"sparse/tests/test_core.py::test_stack[0-shape2]",
"sparse/tests/test_core.py::test_stack[1-shape0]",
"sparse/tests/test_core.py::test_stack[1-shape1]",
"sparse/tests/test_core.py::test_stack[1-shape2]",
"sparse/tests/test_core.py::test_stack[-1-shape0]",
"sparse/tests/test_core.py::test_stack[-1-shape1]",
"sparse/tests/test_core.py::test_stack[-1-shape2]",
"sparse/tests/test_core.py::test_large_concat_stack",
"sparse/tests/test_core.py::test_coord_dtype",
"sparse/tests/test_core.py::test_addition",
"sparse/tests/test_core.py::test_addition_not_ok_when_large_and_sparse",
"sparse/tests/test_core.py::test_broadcasting[shape10-shape20-add]",
"sparse/tests/test_core.py::test_broadcasting[shape10-shape20-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape11-shape21-add]",
"sparse/tests/test_core.py::test_broadcasting[shape11-shape21-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape12-shape22-add]",
"sparse/tests/test_core.py::test_broadcasting[shape12-shape22-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape13-shape23-add]",
"sparse/tests/test_core.py::test_broadcasting[shape13-shape23-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape14-shape24-add]",
"sparse/tests/test_core.py::test_broadcasting[shape14-shape24-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape15-shape25-add]",
"sparse/tests/test_core.py::test_broadcasting[shape15-shape25-mul]",
"sparse/tests/test_core.py::test_broadcast_to[shape10-shape20]",
"sparse/tests/test_core.py::test_broadcast_to[shape11-shape21]",
"sparse/tests/test_core.py::test_broadcast_to[shape12-shape22]",
"sparse/tests/test_core.py::test_scalar_multiplication[2]",
"sparse/tests/test_core.py::test_scalar_multiplication[2.5]",
"sparse/tests/test_core.py::test_scalar_exponentiation",
"sparse/tests/test_core.py::test_create_with_lists_of_tuples",
"sparse/tests/test_core.py::test_sizeof",
"sparse/tests/test_core.py::test_scipy_sparse_interface",
"sparse/tests/test_core.py::test_cache_csr",
"sparse/tests/test_core.py::test_empty_shape",
"sparse/tests/test_core.py::test_single_dimension",
"sparse/tests/test_core.py::test_raise_dense",
"sparse/tests/test_core.py::test_large_sum",
"sparse/tests/test_core.py::test_add_many_sparse_arrays",
"sparse/tests/test_core.py::test_caching",
"sparse/tests/test_core.py::test_scalar_slicing",
"sparse/tests/test_core.py::test_triul[shape0-0]",
"sparse/tests/test_core.py::test_triul[shape1-1]",
"sparse/tests/test_core.py::test_triul[shape2--1]",
"sparse/tests/test_core.py::test_triul[shape3--2]",
"sparse/tests/test_core.py::test_triul[shape4-1000]",
"sparse/tests/test_core.py::test_empty_reduction",
"sparse/tests/test_core.py::test_random_shape[0.1-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.1-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.1-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape2]",
"sparse/tests/test_core.py::test_two_random_unequal",
"sparse/tests/test_core.py::test_two_random_same_seed",
"sparse/tests/test_core.py::test_random_sorted",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_scalar_shape_construction"
] | [] | Modified BSD License | 2,020 | 263 | [
"sparse/core.py"
] |
networkx__networkx-2816 | b271d45e1329ef65d888366c595c010070abe035 | 2018-01-07 03:23:01 | 93b4b9227aa8a7ac4cbd946cf3dae3b168e17b45 | diff --git a/networkx/classes/reportviews.py b/networkx/classes/reportviews.py
index ac5255f2f..6fe61f28f 100644
--- a/networkx/classes/reportviews.py
+++ b/networkx/classes/reportviews.py
@@ -791,8 +791,7 @@ class OutMultiEdgeDataView(OutEdgeDataView):
if data in dd else (n, nbr, default)
def __len__(self):
- return sum(len(kd) for n, nbrs in self._nodes_nbrs()
- for nbr, kd in nbrs.items())
+ return sum(1 for e in self)
def __iter__(self):
return (self._report(n, nbr, k, dd) for n, nbrs in self._nodes_nbrs()
@@ -821,10 +820,6 @@ class MultiEdgeDataView(OutMultiEdgeDataView):
"""An EdgeDataView class for edges of MultiGraph; See EdgeDataView"""
__slots__ = ()
- def __len__(self):
- # nbunch makes it hard to count edges between nodes in nbunch
- return sum(1 for e in self)
-
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
@@ -1016,7 +1011,7 @@ class EdgeView(OutEdgeView):
dataview = EdgeDataView
def __len__(self):
- return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) // 2
+ return sum(len(nbrs) + (n in nbrs) for n, nbrs in self._nodes_nbrs()) // 2
def __iter__(self):
seen = {}
@@ -1120,8 +1115,7 @@ class MultiEdgeView(OutMultiEdgeView):
dataview = MultiEdgeDataView
def __len__(self):
- return sum(len(kdict) for n, nbrs in self._nodes_nbrs()
- for nbr, kdict in nbrs.items()) // 2
+ return sum(1 for e in self)
def __iter__(self):
seen = {}
| len(G.edges) unexpected values
I'm not sure if this is a bug or expected behavior but it's at least confusing. This is using 2.0 nx.Graph() - I would provide the data to recreate, but it's private and I'm not sure why this is occurring, which might just be my lack of knowledge
```
>>> len(G.edges())
300
>>> G.number_of_edges()
312
>>> count = 0
>>> s = set()
>>> for edge in G.edges():
... seen = edge in s or (edge[1], edge[0]) in s
... if not seen:
... count += 1
... s.add(edge)
>>> count
312
```
What would be likely reasons that len() would give a different answer than number_of_edges()? I thought it was because of reversed edges, but that doesn't seem to be the case either. | networkx/networkx | diff --git a/networkx/classes/tests/test_reportviews.py b/networkx/classes/tests/test_reportviews.py
index a4a5c2c1a..7fd96d4dd 100644
--- a/networkx/classes/tests/test_reportviews.py
+++ b/networkx/classes/tests/test_reportviews.py
@@ -326,6 +326,12 @@ class TestEdgeDataView(object):
assert_equal(len(self.G.edges()), 8)
assert_equal(len(self.G.edges), 8)
+ H = self.G.copy()
+ H.add_edge(1, 1)
+ assert_equal(len(H.edges(1)), 3)
+ assert_equal(len(H.edges()), 9)
+ assert_equal(len(H.edges), 9)
+
class TestOutEdgeDataView(TestEdgeDataView):
def setUp(self):
@@ -351,6 +357,12 @@ class TestOutEdgeDataView(TestEdgeDataView):
assert_equal(len(self.G.edges()), 8)
assert_equal(len(self.G.edges), 8)
+ H = self.G.copy()
+ H.add_edge(1, 1)
+ assert_equal(len(H.edges(1)), 2)
+ assert_equal(len(H.edges()), 9)
+ assert_equal(len(H.edges), 9)
+
class TestInEdgeDataView(TestOutEdgeDataView):
def setUp(self):
@@ -486,6 +498,12 @@ class TestEdgeView(object):
num_ed = 9 if self.G.is_multigraph() else 8
assert_equal(len(ev), num_ed)
+ H = self.G.copy()
+ H.add_edge(1, 1)
+ assert_equal(len(H.edges(1)), 3 + H.is_multigraph() - H.is_directed())
+ assert_equal(len(H.edges()), num_ed + 1)
+ assert_equal(len(H.edges), num_ed + 1)
+
def test_and(self):
# print("G & H edges:", gnv & hnv)
ev = self.eview(self.G)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
decorator==5.1.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@b271d45e1329ef65d888366c595c010070abe035#egg=networkx
nose==1.3.7
nose-ignore-docstring==0.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- decorator==5.1.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- nose-ignore-docstring==0.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_len"
] | [
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_len",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_and",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_or",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_xor",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_sub",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_len",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_and",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_or",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_xor",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_sub",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_len",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_and",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_or",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_xor",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_sub",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_repr"
] | [
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_viewtype",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::test_nodedataview_unhashable",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_iterkeys",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_iterkeys",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_iterkeys",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_weight"
] | [] | BSD 3-Clause | 2,021 | 508 | [
"networkx/classes/reportviews.py"
] |
|
PlasmaPy__PlasmaPy-228 | 82eece6d5648641af1878f6846240dbf2a37a190 | 2018-01-07 15:28:48 | 82eece6d5648641af1878f6846240dbf2a37a190 | pep8speaks: Hello @siddharth185! Thanks for submitting the PR.
- In the file [`plasmapy/mathematics/mathematics.py`](https://github.com/PlasmaPy/PlasmaPy/blob/a19f915b468467cd9564f5417d5d38077ec4698a/plasmapy/mathematics/mathematics.py), following are the PEP8 issues :
> [Line 41:80](https://github.com/PlasmaPy/PlasmaPy/blob/a19f915b468467cd9564f5417d5d38077ec4698a/plasmapy/mathematics/mathematics.py#L41): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 119:80](https://github.com/PlasmaPy/PlasmaPy/blob/a19f915b468467cd9564f5417d5d38077ec4698a/plasmapy/mathematics/mathematics.py#L119): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
StanczakDominik: As for the test failures... suddenly all the quantity docstrings seem to be failing, but @siddharth185 didn't change anything related to that. I checked on master at my own fork and well hello there, they're failing on master. This is looking like an @astropy change... I'm looking into it, because I think a potential change on their end may have fixed #135.
StanczakDominik: The newest changes on master should solve the docstring problem.
StanczakDominik: I'm pretty sure this can be merged after it's rebased on top of master! I'll make sure and merge it tonight.
StanczakDominik: Merging it. Thanks, @siddharth185 ! | diff --git a/plasmapy/mathematics/mathematics.py b/plasmapy/mathematics/mathematics.py
index b20ed5e2..58c10e5f 100644
--- a/plasmapy/mathematics/mathematics.py
+++ b/plasmapy/mathematics/mathematics.py
@@ -3,6 +3,7 @@
import numpy as np
from scipy import special
from astropy import units as u
+from scipy.special import wofz as Faddeeva_function
def plasma_dispersion_func(zeta):
@@ -57,9 +58,9 @@ def plasma_dispersion_func(zeta):
>>> plasma_dispersion_func(0)
1.7724538509055159j
>>> plasma_dispersion_func(1j)
- 0.7578721561413119j
+ 0.757872156141312j
>>> plasma_dispersion_func(-1.52+0.47j)
- (0.6088888957234255+0.3349458388287403j)
+ (0.6088888957234254+0.33494583882874024j)
"""
@@ -79,7 +80,7 @@ def plasma_dispersion_func(zeta):
raise ValueError("The argument to plasma_dispersion_function is "
"not finite.")
- Z = 1j * np.sqrt(np.pi) * np.exp(-zeta**2) * (1.0 + special.erf(1j * zeta))
+ Z = 1j * np.sqrt(np.pi) * Faddeeva_function(zeta)
return Z
@@ -124,9 +125,9 @@ def plasma_dispersion_func_deriv(zeta):
>>> plasma_dispersion_func_deriv(0)
(-2+0j)
>>> plasma_dispersion_func_deriv(1j)
- (-0.48425568771737626+0j)
+ (-0.48425568771737604+0j)
>>> plasma_dispersion_func_deriv(-1.52+0.47j)
- (0.1658713314982294+0.4458797880593507j)
+ (0.16587133149822897+0.44587978805935047j)
"""
| Use Dawson function for dispersion
`plasma_dispersion_func` under `mathematics.py` currently uses `erf()` along with some other terms. This can be simplified to Dawson function, [dawsn](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.dawsn.html), and may even offer some minor speedups if scipy implements it in C code. | PlasmaPy/PlasmaPy | diff --git a/plasmapy/mathematics/tests/test_dispersion.py b/plasmapy/mathematics/tests/test_dispersion.py
index 9dabb8e6..1c7eb3ca 100644
--- a/plasmapy/mathematics/tests/test_dispersion.py
+++ b/plasmapy/mathematics/tests/test_dispersion.py
@@ -15,7 +15,7 @@
(0, 1j * np.sqrt(π)),
(1, -1.076_159_013_825_536_8 + 0.652_049_332_173_292_2j),
(1j, 0.757_872_156_141_311_87j),
- (1.2 + 4.4j, -0.054_246_146_372_377_471 + 0.207_960_589_336_958_13j),
+ (1.2 + 4.4j, -0.054_246_157_069_223_27+0.207_960_584_359_855_62j),
(9.2j, plasma_dispersion_func(9.2j * units.dimensionless_unscaled)),
(5.4 - 3.1j, -0.139_224_873_051_713_11 - 0.082_067_822_640_155_802j),
(9.9 - 10j, 2.013_835_257_947_027_6 - 25.901_274_737_989_727j),
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/automated-code-tests.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
exceptiongroup==1.2.2
flake8==7.2.0
idna==3.10
iniconfig==2.1.0
mccabe==0.7.0
numpy==1.26.4
packaging==24.2
-e git+https://github.com/PlasmaPy/PlasmaPy.git@82eece6d5648641af1878f6846240dbf2a37a190#egg=plasmapy
pluggy==1.5.0
pycodestyle==2.13.0
pyerfa==2.0.1.5
pyflakes==3.3.2
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
scipy==1.13.1
tomli==2.2.1
urllib3==2.3.0
| name: PlasmaPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- idna==3.10
- iniconfig==2.1.0
- mccabe==0.7.0
- numpy==1.26.4
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyerfa==2.0.1.5
- pyflakes==3.3.2
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- scipy==1.13.1
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/PlasmaPy
| [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(1.2+4.4j)-(-0.05424615706922327+0.20796058435985562j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[9.2j-0.10806460304119532j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[9j-(-0.012123822585585753+0j)]"
] | [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[-TypeError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[w1-UnitsError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[inf-ValueError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[nan-ValueError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[-TypeError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[w1-UnitsError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[inf-ValueError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[nan-ValueError]"
] | [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[0-1.7724538509055159j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[1-(-1.0761590138255368+0.6520493321732922j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[1j-0.7578721561413119j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(5.4-3.1j)-(-0.1392248730517131-0.0820678226401558j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(9.9-10j)-(2.0138352579470276-25.901274737989727j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(4.5-10j)-(-1.3674950463400947e+35-6.853923234842271e+34j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_power_series_expansion",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_roots",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[0--2]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[1-(0.152318-1.3041j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[1j--0.484257]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(1.2+4.4j)-(-0.0397561-0.0217392j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(5.4-3.1j)-(0.0124491+0.0231383j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(9.9-10j)-(476.153+553.121j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(5+7j)-(-0.0045912-0.0126104j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(4.5-10j)-(2.60153e+36-2.11814e+36j)]"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,022 | 654 | [
"plasmapy/mathematics/mathematics.py"
] |
dpkp__kafka-python-1338 | a69320b8e3199fa9d7cfa3947a242e699a045c3b | 2018-01-10 00:58:02 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | diff --git a/kafka/coordinator/base.py b/kafka/coordinator/base.py
index 30b9c40..24412c9 100644
--- a/kafka/coordinator/base.py
+++ b/kafka/coordinator/base.py
@@ -377,19 +377,23 @@ class BaseCoordinator(object):
# before the pending rebalance has completed.
if self.join_future is None:
self.state = MemberState.REBALANCING
- self.join_future = self._send_join_group_request()
+ future = self._send_join_group_request()
+
+ self.join_future = future # this should happen before adding callbacks
# handle join completion in the callback so that the
# callback will be invoked even if the consumer is woken up
# before finishing the rebalance
- self.join_future.add_callback(self._handle_join_success)
+ future.add_callback(self._handle_join_success)
# we handle failures below after the request finishes.
# If the join completes after having been woken up, the
# exception is ignored and we will rejoin
- self.join_future.add_errback(self._handle_join_failure)
+ future.add_errback(self._handle_join_failure)
+
+ else:
+ future = self.join_future
- future = self.join_future
self._client.poll(future=future)
if future.failed():
| AttributeError: 'NoneType' object has no attribute 'failed'
Via #1315 comments:
```
Traceback (most recent call last):
File "./client_staging.py", line 53, in <module>
results = consumer.poll(timeout_ms=10000, max_records=1)
File "/usr/local/lib/python2.7/dist-packages/kafka/consumer/group.py", line 601, in poll
records = self._poll_once(remaining, max_records)
File "/usr/local/lib/python2.7/dist-packages/kafka/consumer/group.py", line 621, in _poll_once
self._coordinator.poll()
File "/usr/local/lib/python2.7/dist-packages/kafka/coordinator/consumer.py", line 271, in poll
self.ensure_active_group()
File "/usr/local/lib/python2.7/dist-packages/kafka/coordinator/base.py", line 401, in ensure_active_group
if future.failed():
AttributeError: 'NoneType' object has no attribute 'failed'
``` | dpkp/kafka-python | diff --git a/test/test_coordinator.py b/test/test_coordinator.py
index 7dc0e04..f567369 100644
--- a/test/test_coordinator.py
+++ b/test/test_coordinator.py
@@ -620,3 +620,16 @@ def test_lookup_coordinator_failure(mocker, coordinator):
return_value=Future().failure(Exception('foobar')))
future = coordinator.lookup_coordinator()
assert future.failed()
+
+
+def test_ensure_active_group(mocker, coordinator):
+ coordinator._subscription.subscribe(topics=['foobar'])
+ mocker.patch.object(coordinator, 'coordinator_unknown', return_value=False)
+ mocker.patch.object(coordinator, '_send_join_group_request', return_value=Future().success(True))
+ mocker.patch.object(coordinator, 'need_rejoin', side_effect=[True, True, False])
+ mocker.patch.object(coordinator, '_on_join_complete')
+ mocker.patch.object(coordinator, '_heartbeat_thread')
+
+ coordinator.ensure_active_group()
+
+ coordinator._send_join_group_request.assert_called_once_with()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog",
"pytest-sugar",
"pytest-mock",
"mock",
"python-snappy",
"lz4",
"xxhash"
],
"pre_install": [
"apt-get update",
"apt-get install -y libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@a69320b8e3199fa9d7cfa3947a242e699a045c3b#egg=kafka_python
lz4==3.1.10
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- lz4==3.1.10
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_coordinator.py::test_ensure_active_group"
] | [] | [
"test/test_coordinator.py::test_init",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version0]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version1]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version2]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version3]",
"test/test_coordinator.py::test_protocol_type",
"test/test_coordinator.py::test_group_protocols",
"test/test_coordinator.py::test_pattern_subscription[api_version0]",
"test/test_coordinator.py::test_pattern_subscription[api_version1]",
"test/test_coordinator.py::test_pattern_subscription[api_version2]",
"test/test_coordinator.py::test_pattern_subscription[api_version3]",
"test/test_coordinator.py::test_lookup_assignor",
"test/test_coordinator.py::test_join_complete",
"test/test_coordinator.py::test_subscription_listener",
"test/test_coordinator.py::test_subscription_listener_failure",
"test/test_coordinator.py::test_perform_assignment",
"test/test_coordinator.py::test_on_join_prepare",
"test/test_coordinator.py::test_need_rejoin",
"test/test_coordinator.py::test_refresh_committed_offsets_if_needed",
"test/test_coordinator.py::test_fetch_committed_offsets",
"test/test_coordinator.py::test_close",
"test/test_coordinator.py::test_commit_offsets_async",
"test/test_coordinator.py::test_commit_offsets_sync",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version0-foobar-True-None-False-False-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version1-foobar-True-None-True-True-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version2-foobar-True-None-True-True-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version3-foobar-False-None-False-False-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version4-foobar-True-error4-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version5-foobar-True-error5-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version6-foobar-True-error6-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version7-foobar-True-error7-True-True-False-True]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version8-foobar-True-None-True-True-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version9-None-True-None-False-False-True-False]",
"test/test_coordinator.py::test_send_offset_commit_request_fail",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version0-OffsetCommitRequest_v0]",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version1-OffsetCommitRequest_v1]",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version2-OffsetCommitRequest_v2]",
"test/test_coordinator.py::test_send_offset_commit_request_failure",
"test/test_coordinator.py::test_send_offset_commit_request_success",
"test/test_coordinator.py::test_handle_offset_commit_response[response0-GroupAuthorizationFailedError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response1-OffsetMetadataTooLargeError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response2-InvalidCommitOffsetSizeError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response3-GroupLoadInProgressError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response4-GroupCoordinatorNotAvailableError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response5-NotCoordinatorForGroupError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response6-RequestTimedOutError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response7-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response8-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response9-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response10-InvalidTopicError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response11-TopicAuthorizationFailedError-False-False]",
"test/test_coordinator.py::test_send_offset_fetch_request_fail",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version0-OffsetFetchRequest_v0]",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version1-OffsetFetchRequest_v1]",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version2-OffsetFetchRequest_v1]",
"test/test_coordinator.py::test_send_offset_fetch_request_failure",
"test/test_coordinator.py::test_send_offset_fetch_request_success",
"test/test_coordinator.py::test_handle_offset_fetch_response[response0-GroupLoadInProgressError-False]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response1-NotCoordinatorForGroupError-True]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response2-TopicAuthorizationFailedError-False]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response3-None-False]",
"test/test_coordinator.py::test_heartbeat",
"test/test_coordinator.py::test_lookup_coordinator_failure"
] | [] | Apache License 2.0 | 2,030 | 312 | [
"kafka/coordinator/base.py"
] |
|
ultrabug__py3status-1221 | dcd3dda64b82e536cfd0233691d374a22e96aeac | 2018-01-10 08:09:08 | dcd3dda64b82e536cfd0233691d374a22e96aeac | tobes: IMHO this breaks the formatter, because the original tests fail.
With #1095 I do not actually understand what the issue is that you are describing. Maybe if I understood that then this would make more sense.
Now maybe the current behaviour of the formatter is incorrect. Then we should have a clear discussion about that and how to improve it.
lasers: Brief explanation. Apply the git patch. I included some examples. Test each as you go.
```
+ # format = 'A, B, C // {true}' # IS OK | A, B, C // True
+ # format = 'A, B, C // {false}' # IS NOT OK |
+ # format = 'A, B, C // {none}' # IS NOT OK |
+ # format = 'A, B, C // {false} ' # IS OK | A, B, C // False
+ # format = 'A, B, C // {none} ' # IS OK | A, B, C // None
```
Different examples.
* `'Repeat: {repeat}'` -- Expected -- `'Repeat: False'` -- Got `''`.
* `'Hi. Long string. {none}'` -- Expected -- `Hi. Long string. None` -- Got `''`.
First, I never knew about this `{false}` --> `''` and `{none}` --> `''` until I made a pull request... and failed the tests here. Color me surprised. I think few tests may be wrong (fixed now) because it prints `True` on `{true}` and just disappear on `{false}`, `{none}`.
tobes: > First, I never knew about this {false} --> '' and {none} --> '' until I made a pull request
That is intentional. Now whether it is a good idea is a different issue.
The main issue is that the formatter still lacks documentation.
lasers: >That is intentional. Now whether it is a good idea is a different issue.
I amended two tests in this commit. Check them out and decide.
And we can have many `{False}`, `{None}` as we want... and still get nothing.
```
diff --git a/py3status/modules/static_string.py b/py3status/modules/static_string.py
index dbcec8c6..e15b30a4 100644
--- a/py3status/modules/static_string.py
+++ b/py3status/modules/static_string.py
@@ -16,12 +16,15 @@ class Py3status:
"""
"""
# available configuration parameters
- format = 'Hello, world!'
+ format = ' '.join(['{false} {none}' for x in range(5)])
+ # format += ' '
+ # format += ' {true}'
def static_string(self):
return {
'cached_until': self.py3.CACHE_FOREVER,
- 'full_text': self.py3.safe_format(self.format),
+ 'full_text': self.py3.safe_format(
+ self.format, {'true': True, 'false': False, 'none': None}),
}
```
```
$ python3 static_string.py
[]
```
```
$ python3 static_string.py # add space
{'full_text': 'False None False None False None False None False None ', 'cached_until': -1}
```
```
$ python3 static_string.py # add {true}
{'cached_until': -1, 'full_text': 'False None False None False None False None False None True'}
```
It's been said many times that the truth will set you free. I guess it was really true.
ultrabug: @tobes bump plz? | diff --git a/py3status/formatter.py b/py3status/formatter.py
index e265819e..db2875de 100644
--- a/py3status/formatter.py
+++ b/py3status/formatter.py
@@ -268,7 +268,9 @@ class Placeholder:
output = u'{%s%s}' % (self.key, self.format)
value = value_ = output.format(**{self.key: value})
- if block.commands.not_zero:
+ if block.parent is None:
+ valid = True
+ elif block.commands.not_zero:
valid = value_ not in ['', 'None', None, False, '0', '0.0', 0, 0.0]
else:
# '', None, and False are ignored
| Formatting returns empty when closing with a False or None placeholder
Formatting returns empty when closing with a `False` or `None` placeholder.
```diff
diff --git a/py3status/modules/static_string.py b/py3status/modules/static_string.py
index dbcec8c6..593b3740 100644
--- a/py3status/modules/static_string.py
+++ b/py3status/modules/static_string.py
@@ -18,10 +18,17 @@ class Py3status:
# available configuration parameters
format = 'Hello, world!'
+ # format = 'A, B, C // {true}' # IS OK | A, B, C // True
+ # format = 'A, B, C // {false}' # IS NOT OK |
+ # format = 'A, B, C // {none}' # IS NOT OK |
+ # format = 'A, B, C // {false} ' # IS OK | A, B, C // False
+ # format = 'A, B, C // {none} ' # IS OK | A, B, C // None
+
def static_string(self):
+ new_dict = {'true': True, 'false': False, 'none': None}
return {
'cached_until': self.py3.CACHE_FOREVER,
- 'full_text': self.py3.safe_format(self.format),
+ 'full_text': self.py3.safe_format(self.format, new_dict),
}
``` | ultrabug/py3status | diff --git a/tests/test_formatter.py b/tests/test_formatter.py
index 0d1bf9b5..76febab4 100644
--- a/tests/test_formatter.py
+++ b/tests/test_formatter.py
@@ -296,10 +296,18 @@ def test_26():
def test_27():
- run_formatter({'format': '{None}', 'expected': '', })
+ run_formatter({'format': '{None}', 'expected': 'None', })
def test_27a():
+ run_formatter({'format': '{None} {no}', 'expected': 'None False', })
+
+
+def test_27b():
+ run_formatter({'format': '[Hello {None}] {no}', 'expected': ' False', })
+
+
+def test_27c():
run_formatter({'format': '[Hi, my name is {None_str}]', 'expected': '', })
@@ -312,7 +320,7 @@ def test_29():
def test_30():
- run_formatter({'format': '{no}', 'expected': '', })
+ run_formatter({'format': '{no}', 'expected': 'False', })
def test_31():
@@ -1134,7 +1142,7 @@ def test_module_true_value():
def test_module_false_value():
- run_formatter({'format': '{module_false}', 'expected': ''})
+ run_formatter({'format': '{module_false}', 'expected': 'False'})
def test_zero_format_1():
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 3.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/ultrabug/py3status.git@dcd3dda64b82e536cfd0233691d374a22e96aeac#egg=py3status
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-flake8==1.1.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: py3status
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/py3status
| [
"tests/test_formatter.py::test_27",
"tests/test_formatter.py::test_27a",
"tests/test_formatter.py::test_27b",
"tests/test_formatter.py::test_30",
"tests/test_formatter.py::test_module_false_value"
] | [] | [
"tests/test_formatter.py::test_1",
"tests/test_formatter.py::test_2",
"tests/test_formatter.py::test_3",
"tests/test_formatter.py::test_4",
"tests/test_formatter.py::test_5",
"tests/test_formatter.py::test_6",
"tests/test_formatter.py::test_7",
"tests/test_formatter.py::test_8",
"tests/test_formatter.py::test_9",
"tests/test_formatter.py::test_10",
"tests/test_formatter.py::test_11",
"tests/test_formatter.py::test_12",
"tests/test_formatter.py::test_13",
"tests/test_formatter.py::test_14",
"tests/test_formatter.py::test_15",
"tests/test_formatter.py::test_16",
"tests/test_formatter.py::test_16a",
"tests/test_formatter.py::test_16b",
"tests/test_formatter.py::test_17",
"tests/test_formatter.py::test_18",
"tests/test_formatter.py::test_19",
"tests/test_formatter.py::test_20",
"tests/test_formatter.py::test_21",
"tests/test_formatter.py::test_22",
"tests/test_formatter.py::test_23",
"tests/test_formatter.py::test_24",
"tests/test_formatter.py::test_24a",
"tests/test_formatter.py::test_24b",
"tests/test_formatter.py::test_25",
"tests/test_formatter.py::test_26",
"tests/test_formatter.py::test_27c",
"tests/test_formatter.py::test_28",
"tests/test_formatter.py::test_29",
"tests/test_formatter.py::test_31",
"tests/test_formatter.py::test_32",
"tests/test_formatter.py::test_33",
"tests/test_formatter.py::test_34",
"tests/test_formatter.py::test_35",
"tests/test_formatter.py::test_36",
"tests/test_formatter.py::test_37",
"tests/test_formatter.py::test_38",
"tests/test_formatter.py::test_39",
"tests/test_formatter.py::test_40",
"tests/test_formatter.py::test_41",
"tests/test_formatter.py::test_42",
"tests/test_formatter.py::test_43",
"tests/test_formatter.py::test_44",
"tests/test_formatter.py::test_45",
"tests/test_formatter.py::test_46",
"tests/test_formatter.py::test_47",
"tests/test_formatter.py::test_48",
"tests/test_formatter.py::test_49",
"tests/test_formatter.py::test_50",
"tests/test_formatter.py::test_51",
"tests/test_formatter.py::test_52",
"tests/test_formatter.py::test_53",
"tests/test_formatter.py::test_54",
"tests/test_formatter.py::test_55",
"tests/test_formatter.py::test_56",
"tests/test_formatter.py::test_57",
"tests/test_formatter.py::test_58",
"tests/test_formatter.py::test_58a",
"tests/test_formatter.py::test_59",
"tests/test_formatter.py::test_59a",
"tests/test_formatter.py::test_60",
"tests/test_formatter.py::test_61",
"tests/test_formatter.py::test_62",
"tests/test_formatter.py::test_63",
"tests/test_formatter.py::test_64",
"tests/test_formatter.py::test_65",
"tests/test_formatter.py::test_66",
"tests/test_formatter.py::test_67",
"tests/test_formatter.py::test_68",
"tests/test_formatter.py::test_69",
"tests/test_formatter.py::test_70",
"tests/test_formatter.py::test_70a",
"tests/test_formatter.py::test_71",
"tests/test_formatter.py::test_72",
"tests/test_formatter.py::test_73",
"tests/test_formatter.py::test_74",
"tests/test_formatter.py::test_75",
"tests/test_formatter.py::test_76",
"tests/test_formatter.py::test_77",
"tests/test_formatter.py::test_78",
"tests/test_formatter.py::test_else_true",
"tests/test_formatter.py::test_else_false",
"tests/test_formatter.py::test_color_name_1",
"tests/test_formatter.py::test_color_name_2",
"tests/test_formatter.py::test_color_name_3",
"tests/test_formatter.py::test_color_name_4",
"tests/test_formatter.py::test_color_name_4a",
"tests/test_formatter.py::test_color_name_5",
"tests/test_formatter.py::test_color_name_5a",
"tests/test_formatter.py::test_color_name_6",
"tests/test_formatter.py::test_color_name_7",
"tests/test_formatter.py::test_color_name_7a",
"tests/test_formatter.py::test_color_1",
"tests/test_formatter.py::test_color_1a",
"tests/test_formatter.py::test_color_2",
"tests/test_formatter.py::test_color_3",
"tests/test_formatter.py::test_color_4",
"tests/test_formatter.py::test_color_5",
"tests/test_formatter.py::test_color_6",
"tests/test_formatter.py::test_color_7",
"tests/test_formatter.py::test_color_7a",
"tests/test_formatter.py::test_color_8",
"tests/test_formatter.py::test_color_8a",
"tests/test_formatter.py::test_color_9",
"tests/test_formatter.py::test_color_9a",
"tests/test_formatter.py::test_composite_1",
"tests/test_formatter.py::test_composite_2",
"tests/test_formatter.py::test_composite_3",
"tests/test_formatter.py::test_composite_4",
"tests/test_formatter.py::test_composite_5",
"tests/test_formatter.py::test_composite_6",
"tests/test_formatter.py::test_attr_getter",
"tests/test_formatter.py::test_min_length_1",
"tests/test_formatter.py::test_min_length_2",
"tests/test_formatter.py::test_min_length_3",
"tests/test_formatter.py::test_min_length_4",
"tests/test_formatter.py::test_min_length_5",
"tests/test_formatter.py::test_min_length_6",
"tests/test_formatter.py::test_numeric_strings_1",
"tests/test_formatter.py::test_numeric_strings_2",
"tests/test_formatter.py::test_numeric_strings_3",
"tests/test_formatter.py::test_numeric_strings_4",
"tests/test_formatter.py::test_numeric_strings_5",
"tests/test_formatter.py::test_numeric_strings_6",
"tests/test_formatter.py::test_not_zero_1",
"tests/test_formatter.py::test_not_zero_2",
"tests/test_formatter.py::test_not_zero_3",
"tests/test_formatter.py::test_not_zero_4",
"tests/test_formatter.py::test_not_zero_5",
"tests/test_formatter.py::test_not_zero_6",
"tests/test_formatter.py::test_not_zero_7",
"tests/test_formatter.py::test_not_zero_8",
"tests/test_formatter.py::test_not_zero_9",
"tests/test_formatter.py::test_not_zero_10",
"tests/test_formatter.py::test_not_zero_11",
"tests/test_formatter.py::test_bad_composite_color",
"tests/test_formatter.py::test_soft_1",
"tests/test_formatter.py::test_soft_2",
"tests/test_formatter.py::test_soft_3",
"tests/test_formatter.py::test_soft_4",
"tests/test_formatter.py::test_soft_5",
"tests/test_formatter.py::test_soft_6",
"tests/test_formatter.py::test_soft_7",
"tests/test_formatter.py::test_module_true",
"tests/test_formatter.py::test_module_false",
"tests/test_formatter.py::test_module_true_value",
"tests/test_formatter.py::test_zero_format_1",
"tests/test_formatter.py::test_zero_format_2",
"tests/test_formatter.py::test_zero_format_3",
"tests/test_formatter.py::test_zero_format_4",
"tests/test_formatter.py::test_inherit_not_zero_1",
"tests/test_formatter.py::test_inherit_not_zero_2",
"tests/test_formatter.py::test_inherit_not_zero_3",
"tests/test_formatter.py::test_inherit_show_1",
"tests/test_formatter.py::test_inherit_color_1",
"tests/test_formatter.py::test_inherit_color_2",
"tests/test_formatter.py::test_conditions_1",
"tests/test_formatter.py::test_conditions_2",
"tests/test_formatter.py::test_conditions_3",
"tests/test_formatter.py::test_conditions_4",
"tests/test_formatter.py::test_conditions_5",
"tests/test_formatter.py::test_conditions_6",
"tests/test_formatter.py::test_conditions_7",
"tests/test_formatter.py::test_conditions_8",
"tests/test_formatter.py::test_conditions_9",
"tests/test_formatter.py::test_conditions_10",
"tests/test_formatter.py::test_conditions_11",
"tests/test_formatter.py::test_conditions_12",
"tests/test_formatter.py::test_conditions_13",
"tests/test_formatter.py::test_conditions_14",
"tests/test_formatter.py::test_conditions_15",
"tests/test_formatter.py::test_conditions_16",
"tests/test_formatter.py::test_conditions_17",
"tests/test_formatter.py::test_conditions_18",
"tests/test_formatter.py::test_conditions_19",
"tests/test_formatter.py::test_conditions_20",
"tests/test_formatter.py::test_conditions_21",
"tests/test_formatter.py::test_conditions_22",
"tests/test_formatter.py::test_conditions_23",
"tests/test_formatter.py::test_trailing_zeroes_1",
"tests/test_formatter.py::test_trailing_zeroes_2",
"tests/test_formatter.py::test_ceiling_numbers_1",
"tests/test_formatter.py::test_ceiling_numbers_2"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,031 | 184 | [
"py3status/formatter.py"
] |
NeuralEnsemble__python-neo-454 | 6b6c7ef2d148de5431cbd8f254430251c3d34dde | 2018-01-10 10:48:54 | f0285a7ab15ff6535d3e6736e0163c4fa6aea091 | pep8speaks: Hello @apdavison! Thanks for submitting the PR.
- In the file [`neo/core/epoch.py`](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py), following are the PEP8 issues :
> [Line 22:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L22): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
> [Line 23:17](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L23): [E127](https://duckduckgo.com/?q=pep8%20E127) continuation line over-indented for visual indent
> [Line 23:75](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L23): [E251](https://duckduckgo.com/?q=pep8%20E251) unexpected spaces around keyword / parameter equals
> [Line 23:77](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L23): [E251](https://duckduckgo.com/?q=pep8%20E251) unexpected spaces around keyword / parameter equals
> [Line 26:70](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L26): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace
> [Line 28:15](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L28): [E201](https://duckduckgo.com/?q=pep8%20E201) whitespace after '('
> [Line 28:100](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L28): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (112 > 99 characters)
> [Line 29:18](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L29): [E127](https://duckduckgo.com/?q=pep8%20E127) continuation line over-indented for visual indent
> [Line 33:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L33): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
> [Line 118:5](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L118): [E301](https://duckduckgo.com/?q=pep8%20E301) expected 1 blank line, found 0
> [Line 125:60](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L125): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace
> [Line 207:67](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L207): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace
> [Line 244:9](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L244): [E265](https://duckduckgo.com/?q=pep8%20E265) block comment should start with '# '
> [Line 245:9](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L245): [E265](https://duckduckgo.com/?q=pep8%20E265) block comment should start with '# '
- In the file [`neo/test/coretest/test_epoch.py`](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py), following are the PEP8 issues :
> [Line 240:58](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L240): [E231](https://duckduckgo.com/?q=pep8%20E231) missing whitespace after ','
> [Line 256:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L256): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 257:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L257): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 258:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L258): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 259:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L259): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 262:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L262): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 290:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L290): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 293:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L293): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 294:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L294): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 295:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L295): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 298:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L298): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 314:5](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L314): [E303](https://duckduckgo.com/?q=pep8%20E303) too many blank lines (2)
> [Line 325:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L325): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 326:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L326): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 327:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L327): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 328:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L328): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 331:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L331): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 359:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L359): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 361:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L361): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 362:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L362): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 363:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L363): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 366:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L366): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 380:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L380): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 394:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L394): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 395:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L395): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 396:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L396): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 397:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L397): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 400:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L400): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 414:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L414): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 428:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L428): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 431:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L431): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 432:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L432): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 433:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L433): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 436:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L436): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 450:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L450): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 464:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L464): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 465:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L465): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 466:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L466): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 467:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L467): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 470:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L470): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 471:25](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L471): [E221](https://duckduckgo.com/?q=pep8%20E221) multiple spaces before operator
> [Line 484:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L484): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 511:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L511): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 541:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L541): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
> [Line 561:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L561): [E305](https://duckduckgo.com/?q=pep8%20E305) expected 2 blank lines after class or function definition, found 1
coveralls:
[](https://coveralls.io/builds/14981826)
Coverage increased (+0.05%) to 49.221% when pulling **a51ab8b16efe9d342375b688c269567647e76f1e on apdavison:issue413** into **6fd9f49c8b0c497adfa777549cc97dc301f2dba2 on NeuralEnsemble:master**.
coveralls:
[](https://coveralls.io/builds/14981916)
Coverage increased (+0.05%) to 49.221% when pulling **cbcf404cdd0ca665b2da22360ef78d4a1c705f75 on apdavison:issue413** into **6fd9f49c8b0c497adfa777549cc97dc301f2dba2 on NeuralEnsemble:master**.
samuelgarcia: Hi andrew. Could you fix conflict in the file. So it would trigered a new test on circleci ?
So we could merge.
bjoern1001001: I have a question about this, because I have been working on `SpikeTrain.__getitem__` for the last days. It's rather general, but I noticed this here.
I noticed here that what you wrote works in Epoch for things like epoch[1], while it would raise an error in SpikeTrain, because when you create the new Epoch in `__getitem__`, times is a quantity scalar, NOT an array or Epoch. Creating a SpikeTrain like this in `__getitem__` via `SpikeTrain(times=super(...).__getitem, ...)` would raise an error here saying it's testing the length of an unsized object. Is it supposed to be okay that epochs are created with a scalar quantity instead of a quantity array? And if that is the case, shouldn't it be converted to an array then in order to ensure consistency?
A little example of what happens:
`epoch = neo.Epoch([1,2,3]*pq.s)`
`ep1 = epoch[1] # ep1 is now an Epoch with times=2*pq.s, not [2]*pq.s or array([2]*pq.s)`
This is because the following happens in `__getitem__`:
`times = super(...).__getitem__(1)`
Times now becomes a scalar quantity, because numpy `__getitem__` returns a scalar instead of an array when called with int index. From this pq.Quantity.__getitem__ creates a quantity, because it is not yet a quantity.
`obj = Epoch(times=times, ...)`
This is not a problem, because there is no test about whether times is an array or not
`...`
`return obj # This returns obj, with times=2*pq.s`
Should it be like this or should a test for this be introduced?
Apart from that I'd like to know if any of you has deeper knowledge of `__getitem__` because I noticed together with @JuliaSprenger that there a scalar quantity is returned instead of a SpikeTrain object. I've been trying to change this but it fails. Trying to copy what you did, I noticed that this works for Epoch, but not for SpikeTrain.
Again some code to illustrate what I mean:
What currently happens:
`st1 = new SpikeTrain(times=[1,2,3]*pq.s, ...)`
`testobj = st1[1]`
testobj is now a (scalar) quantity object, not a SpikeTrain
`print(testobject) # This prints 2 s`
`new_st = st1[0:1]`
new_st is a SpikeTrain, because numpy returns a SpikeTrain array
`print(new_st) # This prints [1] s`
If one would do the same as you did in Epoch for SpikeTrain, it would raise an error
`st2 = st1[1] # This would raise an error`
What happens SpikeTrain `__getitem__`:
`times = super(...).__getitem__(1)`
times is now a scalar quantity just like in Epoch
`obj = SpikeTrain(times=times, ...)`
This will raise an error in `SpikeTrain.__new__` (line 220), because times is a scalar, and thus an unsized object: TypeError: len() of unsized object
On the other hand, calling things like `st1[0:1]` will work just fine, because numpy then returns an array (or a subclass, which is SpikeTrain in this case)
I then tried creating a new SpikeTrain if I get a quantity by creating a list with a single entry:
`if not isinstance(obj, SpikeTrain):`
`obj = SpikeTrain(times=obj.magnitude, t_stop=self.t_stop, units=self.units) # Error happens here`
This works for most cases, but it fails whenever calling sorted(st1, st2), because apparently some kind of 'intermediate' SpikeTrains are generated that don't contain any attributes.
The error I get then is that SpikeTrain object [referring to `self` here] has no attribute t_stop.
If anybody knows more about this stuff, I'd really appreciate your help.
bjoern1001001: I kept working on this problem and noticed that returning a SpikeTrain when st[int] is called would create more problems than it solves. So it seems to be fine that a scalar Quantity object is returned.
But in my opinion this should be consistent across all neo objects, which means that also here in Epoch.__getitem__ an Epoch should be returned only, when ep[slice] was called, not when ep[int] was called. Do you agree with this @apdavison @samuelgarcia?
This would also simplify the code a bit here, because no new Epoch would have to be created but instead you would only have to check if an Epoch was returned from super(...).__getitem__() and if that is the case, slice durations correctly, like it is done in SpikeTrain.__getitem__.
On a more general level, as I wrote in the previous comment, I want to ask if it makes sense to allow Epochs to be created with times being a scalar instead of an array. In class SpikeTrain there are checks that fail if times is not a list or an array, whereas in Epoch nothing similar happens. If times is a scalar, then ep.times is simply a scalar quantity. Are there reasons for this or should checks be implemented to make sure it's consistent? And if it should be allowed, wouldn't it make sense to wrap it in an array then?
Apart from that it's also not assured that ep.labels and ep.durations are the same length as ep.times. I think this should also be checked in order to ensure consistency. | diff --git a/neo/core/epoch.py b/neo/core/epoch.py
index a5cd367c..359947f9 100644
--- a/neo/core/epoch.py
+++ b/neo/core/epoch.py
@@ -24,10 +24,10 @@ def _new_epoch(cls, times=None, durations=None, labels=None, units=None,
name=None, description=None, file_origin=None, annotations=None, segment=None):
'''
A function to map epoch.__new__ to function that
- does not do the unit checking. This is needed for pickle to work.
+ does not do the unit checking. This is needed for pickle to work.
'''
- e = Epoch(times=times, durations=durations, labels=labels, units=units, name=name, file_origin=file_origin,
- description=description, **annotations)
+ e = Epoch(times=times, durations=durations, labels=labels, units=units, name=name,
+ file_origin=file_origin, description=description, **annotations)
e.segment = segment
return e
@@ -151,6 +151,16 @@ class Epoch(BaseNeo, pq.Quantity):
label, time, dur in zip(labels, self.times, self.durations)]
return '<Epoch: %s>' % ', '.join(objs)
+ def __getitem__(self, i):
+ '''
+ Get the item or slice :attr:`i`.
+ '''
+ obj = Epoch(times=super(Epoch, self).__getitem__(i))
+ obj._copy_data_complement(self)
+ obj.durations = self.durations[i]
+ obj.labels = self.labels[i]
+ return obj
+
@property
def times(self):
return pq.Quantity(self)
@@ -232,10 +242,7 @@ class Epoch(BaseNeo, pq.Quantity):
_t_stop = np.inf
indices = (self >= _t_start) & (self <= _t_stop)
-
new_epc = self[indices]
- new_epc.durations = self.durations[indices]
- new_epc.labels = self.labels[indices]
return new_epc
def as_array(self, units=None):
| Slicing epochs does only slice times not durations
When slicing an epoch only the times array is sliced and the durations array is not modified. This results in incompatible number of time stamps and durations. | NeuralEnsemble/python-neo | diff --git a/neo/test/coretest/test_epoch.py b/neo/test/coretest/test_epoch.py
index 93f3acac..82449951 100644
--- a/neo/test/coretest/test_epoch.py
+++ b/neo/test/coretest/test_epoch.py
@@ -497,6 +497,28 @@ class TestEpoch(unittest.TestCase):
self.assertIsInstance(epc_as_q, pq.Quantity)
assert_array_equal(times * pq.ms, epc_as_q)
+ def test_getitem(self):
+ times = [2, 3, 4, 5]
+ durations = [0.1, 0.2, 0.3, 0.4]
+ labels = ["A", "B", "C", "D"]
+ epc = Epoch(times * pq.ms, durations=durations * pq.ms, labels=labels)
+ single_epoch = epc[2]
+ self.assertIsInstance(single_epoch, Epoch)
+ assert_array_equal(single_epoch.times, np.array([4.0]))
+ assert_array_equal(single_epoch.durations, np.array([0.3]))
+ assert_array_equal(single_epoch.labels, np.array(["C"]))
+
+ def test_slice(self):
+ times = [2, 3, 4, 5]
+ durations = [0.1, 0.2, 0.3, 0.4]
+ labels = ["A", "B", "C", "D"]
+ epc = Epoch(times * pq.ms, durations=durations * pq.ms, labels=labels)
+ single_epoch = epc[1:3]
+ self.assertIsInstance(single_epoch, Epoch)
+ assert_array_equal(single_epoch.times, np.array([3.0, 4.0]))
+ assert_array_equal(single_epoch.durations, np.array([0.2, 0.3]))
+ assert_array_equal(single_epoch.labels, np.array(["B", "C"]))
+
class TestDuplicateWithNewData(unittest.TestCase):
def setUp(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/NeuralEnsemble/python-neo.git@6b6c7ef2d148de5431cbd8f254430251c3d34dde#egg=neo
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
quantities==0.13.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: python-neo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- quantities==0.13.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-neo
| [
"neo/test/coretest/test_epoch.py::TestEpoch::test_getitem",
"neo/test/coretest/test_epoch.py::TestEpoch::test_slice"
] | [] | [
"neo/test/coretest/test_epoch.py::Test__generate_datasets::test__fake_neo__cascade",
"neo/test/coretest/test_epoch.py::Test__generate_datasets::test__fake_neo__nocascade",
"neo/test/coretest/test_epoch.py::Test__generate_datasets::test__get_fake_values",
"neo/test/coretest/test_epoch.py::TestEpoch::test_Epoch_creation",
"neo/test/coretest/test_epoch.py::TestEpoch::test_Epoch_merge",
"neo/test/coretest/test_epoch.py::TestEpoch::test_Epoch_repr",
"neo/test/coretest/test_epoch.py::TestEpoch::test__children",
"neo/test/coretest/test_epoch.py::TestEpoch::test__time_slice",
"neo/test/coretest/test_epoch.py::TestEpoch::test_as_array",
"neo/test/coretest/test_epoch.py::TestEpoch::test_as_quantity",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice2",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_differnt_units",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_empty",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_none_both",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_none_start",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_none_stop",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_out_of_boundries",
"neo/test/coretest/test_epoch.py::TestDuplicateWithNewData::test_duplicate_with_new_data",
"neo/test/coretest/test_epoch.py::TestEventFunctions::test__pickle"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,032 | 501 | [
"neo/core/epoch.py"
] |
automl__SMAC3-368 | dc8ebc9763d15516d4f96c32e75c431783487845 | 2018-01-11 16:33:51 | f710fa60dbf2c64e42ce14aa0eb529f92378560a | diff --git a/smac/facade/func_facade.py b/smac/facade/func_facade.py
index c747fd01b..5a5aefc74 100644
--- a/smac/facade/func_facade.py
+++ b/smac/facade/func_facade.py
@@ -52,8 +52,12 @@ def fmin_smac(func: callable,
"""
# create configuration space
cs = ConfigurationSpace()
+
+ # Adjust zero padding
+ tmplt = 'x{0:0' + str(len(str(len(bounds)))) + 'd}'
+
for idx, (lower_bound, upper_bound) in enumerate(bounds):
- parameter = UniformFloatHyperparameter(name="x%d" % (idx + 1),
+ parameter = UniformFloatHyperparameter(name=tmplt.format(idx + 1),
lower=lower_bound,
upper=upper_bound,
default_value=x0[idx])
@@ -77,11 +81,10 @@ def fmin_smac(func: callable,
smac = SMAC(scenario=scenario, tae_runner=ta, rng=rng)
smac.logger = logging.getLogger(smac.__module__ + "." + smac.__class__.__name__)
incumbent = smac.optimize()
-
config_id = smac.solver.runhistory.config_ids[incumbent]
run_key = RunKey(config_id, None, 0)
incumbent_performance = smac.solver.runhistory.data[run_key]
- incumbent = np.array([incumbent['x%d' % (idx + 1)]
+ incumbent = np.array([incumbent[tmplt.format(idx + 1)]
for idx in range(len(bounds))], dtype=np.float)
return incumbent, incumbent_performance.cost, \
smac
| Wrong parameters order
Hi all,
we have realized that when using fmin_smac, the input parameters given to the function are processed in the wrong order if their count is higher than 9.
For instance when executing this command:
x, cost, _ = fmin_smac(func=to_minimize,
x0=[0.5, 20, 0.7, 0.15, 10, 0.5, 1.0, 0.1, 0.25, 1.0, 0.5],
bounds=[(0, 1), (5, 25), (0.1, 2.0), (0.0, 1.5), (2, 20), (0, 1), (1, 1.3), (0.001, 1), (0.001, 2), (0.05, 9), (0.2, 0.8)],
maxfun=2000,
rng=3)
the input parameters are swapped in the following way, as reported in the configspace.pcs file:
x1 real [0.0, 1.0] [0.5]
x10 real [0.05, 9.0] [1.0]
x11 real [0.2, 0.8] [0.5]
x2 real [5.0, 25.0] [20.0]
x3 real [0.1, 2.0] [0.7]
x4 real [0.0, 1.5] [0.15]
x5 real [2.0, 20.0] [10.0]
x6 real [0.0, 1.0] [0.5]
x7 real [1.0, 1.3] [1.0]
x8 real [0.001, 1.0] [0.1]
x9 real [0.001, 2.0] [0.25]
Any idea what's happening there? Thanks for your help!
Best,
L.
| automl/SMAC3 | diff --git a/test/test_facade/test_func_facade.py b/test/test_facade/test_func_facade.py
index 11ab7c88d..43748ed1c 100644
--- a/test/test_facade/test_func_facade.py
+++ b/test/test_facade/test_func_facade.py
@@ -33,3 +33,20 @@ class TestSMACFacade(unittest.TestCase):
self.assertEqual(type(f), type(f_s))
self.output_dirs.append(smac.scenario.output_dir)
+
+ def test_parameter_order(self):
+ def func(x):
+ for i in range(len(x)):
+ self.assertLess(i - 1, x[i])
+ self.assertGreater(i, x[i])
+ return 1
+
+ default = [i - 0.5 for i in range(10)]
+ bounds = [(i - 1, i) for i in range(10)]
+ print(default, bounds)
+ _, _, smac = fmin_smac(func=func, x0=default,
+ bounds=bounds,
+ maxfun=1)
+
+ self.output_dirs.append(smac.scenario.output_dir)
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y build-essential swig"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
ConfigSpace==0.4.19
Cython==3.0.12
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
joblib==1.1.1
MarkupSafe==2.0.1
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
Pygments==2.14.0
pynisher==0.6.4
pyparsing==3.1.4
pyrfr==0.8.2
pytest==7.0.1
pytz==2025.2
requests==2.27.1
scikit-learn==0.24.2
scipy==1.5.4
six==1.17.0
-e git+https://github.com/automl/SMAC3.git@dc8ebc9763d15516d4f96c32e75c431783487845#egg=smac
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
threadpoolctl==3.1.0
tomli==1.2.3
typing==3.7.4.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: SMAC3
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- configspace==0.4.19
- cython==3.0.12
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- joblib==1.1.1
- markupsafe==2.0.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pygments==2.14.0
- pynisher==0.6.4
- pyparsing==3.1.4
- pyrfr==0.8.2
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- scikit-learn==0.24.2
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- threadpoolctl==3.1.0
- tomli==1.2.3
- typing==3.7.4.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/SMAC3
| [
"test/test_facade/test_func_facade.py::TestSMACFacade::test_parameter_order"
] | [] | [
"test/test_facade/test_func_facade.py::TestSMACFacade::test_func_smac"
] | [] | BSD 3-Clause License | 2,035 | 404 | [
"smac/facade/func_facade.py"
] |
|
wright-group__WrightTools-448 | 49cb335e4b0dd3556304ec72daa65cd812493f3b | 2018-01-12 21:09:47 | 592649ce55c9fa7847325c9e9b15b320a38f1389 | pep8speaks: Hello @untzag! Thanks for submitting the PR.
- In the file [`WrightTools/_dataset.py`](https://github.com/wright-group/WrightTools/blob/b2240c89d941647b42212c46b3463c1bc55b9d71/WrightTools/_dataset.py), following are the PEP8 issues :
> [Line 217:9](https://github.com/wright-group/WrightTools/blob/b2240c89d941647b42212c46b3463c1bc55b9d71/WrightTools/_dataset.py#L217): [E306](https://duckduckgo.com/?q=pep8%20E306) expected 1 blank line before a nested definition, found 0
| diff --git a/WrightTools/_dataset.py b/WrightTools/_dataset.py
index 0331efe..9f46ef3 100644
--- a/WrightTools/_dataset.py
+++ b/WrightTools/_dataset.py
@@ -6,13 +6,14 @@
import posixpath
import collections
-from concurrent.futures import ThreadPoolExecutor
import numpy as np
import h5py
+from . import exceptions as wt_exceptions
from . import kit as wt_kit
+from . import units as wt_units
# --- class ---------------------------------------------------------------------------------------
@@ -196,6 +197,27 @@ class Dataset(h5py.Dataset):
self.chunkwise(f, min=min, max=max, replace=replace)
+ def convert(self, destination_units):
+ """Convert units.
+
+ Parameters
+ ----------
+ destination_units : string (optional)
+ Units to convert into.
+ """
+ if not wt_units.is_valid_conversion(self.units, destination_units):
+ kind = wt_units.kind(self.units)
+ valid = list(wt_units.dicts[kind].keys())
+ raise wt_exceptions.UnitsError(valid, destination_units)
+ if self.units is None:
+ return
+
+ def f(dataset, s, destination_units):
+ dataset[s] = wt_units.converter(dataset[s], dataset.units, destination_units)
+
+ self.chunkwise(f, destination_units=destination_units)
+ self.units = destination_units
+
def log(self, base=np.e, floor=None):
"""Take the log of the entire dataset.
diff --git a/WrightTools/data/_axis.py b/WrightTools/data/_axis.py
index 11132b0..1485046 100644
--- a/WrightTools/data/_axis.py
+++ b/WrightTools/data/_axis.py
@@ -79,20 +79,10 @@ class Axis(object):
@property
def label(self):
- label_seed = [v.label for v in self.variables]
- symbol_type = wt_units.get_default_symbol_type(self.units)
- label = r'$\mathsf{'
- for part in label_seed:
- if self.units_kind is not None:
- units_dictionary = getattr(wt_units, self.units_kind)
- label += getattr(wt_units, symbol_type)[self.units]
- if part is not '':
- label += r'_{' + str(part) + r'}'
- else:
- label += self.name.replace('_', '\,\,')
- # TODO: handle all operators
- label += r'='
- label = label[:-1] # remove the last equals sign
+ symbol = wt_units.get_symbol(self.units)
+ label = r'$\mathsf{' + self.expression
+ for v in self.variables:
+ label = label.replace(v.natural_name, '%s_{%s}' % (symbol, v.label))
if self.units_kind:
units_dictionary = getattr(wt_units, self.units_kind)
label += r'\,'
@@ -139,9 +129,7 @@ class Axis(object):
@property
def units_kind(self):
"""Units kind."""
- for dic in wt_units.unit_dicts:
- if self.units in dic.keys():
- return dic['kind']
+ return wt_units.kind(self.units)
@property
def variables(self):
@@ -156,21 +144,23 @@ class Axis(object):
finally:
return self._variables
- def convert(self, destination_units):
+ def convert(self, destination_units, *, convert_variables=False):
"""Convert axis to destination_units.
Parameters
----------
destination_units : string
Destination units.
+ convert_variables : boolean (optional)
+ Toggle conversion of stored arrays. Default is False.
"""
- destination_units_kind = None
- for dic in wt_units.unit_dicts:
- if destination_units in dic.keys():
- destination_units_kind = dic['kind']
- break
- if not self.units_kind == destination_units_kind:
- raise wt_exceptions.UnitsError(self.units_kind, destination_units_kind)
+ if not wt_units.is_valid_conversion(self.units, destination_units):
+ kind = wt_units.kind(self.units)
+ valid = list(wt_units.dicts[kind].keys())
+ raise wt_exceptions.UnitsError(valid, destination_units)
+ if convert_variables:
+ for v in self.variables:
+ v.convert(destination_units)
self.units = destination_units
def max(self):
diff --git a/WrightTools/data/_data.py b/WrightTools/data/_data.py
index fba69ab..09e060d 100644
--- a/WrightTools/data/_data.py
+++ b/WrightTools/data/_data.py
@@ -313,9 +313,19 @@ class Data(Group):
idx = tuple(idx)
data = out.create_data(name='chop%03i' % i)
for v in self.variables:
- data.create_variable(name=v.natural_name, values=v[idx], units=v.units)
+ kwargs = {}
+ kwargs['name'] = v.natural_name
+ kwargs['values'] = v[idx]
+ kwargs['units'] = v.units
+ kwargs['label'] = v.label
+ data.create_variable(**kwargs)
for c in self.channels:
- data.create_channel(name=c.natural_name, values=c[idx], units=c.units)
+ kwargs = {}
+ kwargs['name'] = c.natural_name
+ kwargs['values'] = c[idx]
+ kwargs['units'] = c.units
+ kwargs['label'] = c.label
+ data.create_channel(**kwargs)
data.transform([a.expression for a in kept_axes if a.expression not in at.keys()])
i += 1
out.flush()
@@ -380,13 +390,15 @@ class Data(Group):
self._axes.pop(axis_index)
self._update_natural_namespace()
- def convert(self, destination_units, verbose=True):
- """Convert all compatable constants and axes to given units.
+ def convert(self, destination_units, *, convert_variables=False, verbose=True):
+ """Convert all compatable axes to given units.
Parameters
----------
destination_units : str
Destination units.
+ convert_variables : boolean (optional)
+ Toggle conversion of stored arrays. Default is False
verbose : bool (optional)
Toggle talkback. Default is True.
@@ -397,13 +409,11 @@ class Data(Group):
axis object in data.axes or data.constants.
"""
# get kind of units
- for dic in wt_units.unit_dicts:
- if destination_units in dic.keys():
- units_kind = dic['kind']
+ units_kind = wt_units.kind(destination_units)
# apply to all compatible axes
for axis in self.axes + self.constants:
if axis.units_kind == units_kind:
- axis.convert(destination_units)
+ axis.convert(destination_units, convert_variables=convert_variables)
if verbose:
print('axis', axis.expression, 'converted')
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py
index c74c4b9..b7cd2d1 100644
--- a/WrightTools/kit/_array.py
+++ b/WrightTools/kit/_array.py
@@ -270,12 +270,18 @@ def valid_index(index, shape):
tuple
Valid index.
"""
+ # append slices to index
+ index = list(index)
+ while len(index) < len(shape):
+ index.append(slice(None))
+ # fill out, in reverse
out = []
for i, s in zip(index[::-1], shape[::-1]):
if s == 1:
- out.append(0)
- elif isinstance(i, slice):
- out.append(i)
+ if isinstance(i, slice):
+ out.append(slice(None))
+ else:
+ out.append(0)
else:
- out.append(min(s - 1, i))
+ out.append(i)
return tuple(out[::-1])
diff --git a/WrightTools/units.py b/WrightTools/units.py
index 131f667..76383da 100644
--- a/WrightTools/units.py
+++ b/WrightTools/units.py
@@ -4,34 +4,31 @@
# --- import --------------------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function, unicode_literals
+import collections
import numpy as np
import warnings
-# --- units ---------------------------------------------------------------------------------------
+# --- define --------------------------------------------------------------------------------------
# units are stored in dictionaries of like kind. format:
# unit : to native, from native, units_symbol, units_label
# angle units (native: rad)
-angle = {'kind': 'angle',
- 'rad': ['x', 'x', r'rad'],
+angle = {'rad': ['x', 'x', r'rad'],
'deg': ['x/57.2958', '57.2958*x', r'deg']}
# delay units (native: fs)
fs_per_mm = 3336.
-delay = {'kind': 'delay',
- 'fs': ['x', 'x', r'fs'],
+delay = {'fs': ['x', 'x', r'fs'],
'ps': ['x*1e3', 'x/1e3', r'ps'],
'ns': ['x*1e6', 'x/1e6', r'ns'],
'mm_delay': ['x*2*fs_per_mm', 'x/(2*fs_per_mm)', r'mm']}
# energy units (native: nm)
-energy = {'kind': 'energy',
- 'nm': ['x', 'x', r'nm'],
+energy = {'nm': ['x', 'x', r'nm'],
'wn': ['1e7/x', '1e7/x', r'cm^{-1}'],
'eV': ['1240./x', '1240./x', r'eV'],
'meV': ['1240000./x', '1240000./x', r'meV'],
@@ -40,30 +37,24 @@ energy = {'kind': 'energy',
'GHz': ['2.99792458e8/x', '2.99792458e8/x', r'GHz']}
# fluence units (native: uJ per sq. cm)
-fluence = {'kind': 'fluence',
- 'uJ per sq. cm': ['x', 'x', r'\frac{\mu J}{cm^{2}}']}
+fluence = {'uJ per sq. cm': ['x', 'x', r'\frac{\mu J}{cm^{2}}']}
# optical density units (native: od)
-od = {'kind': 'od',
- 'mOD': ['1e3*x', 'x/1e3', r'mOD'],
+od = {'mOD': ['1e3*x', 'x/1e3', r'mOD'],
'OD': ['x', 'x', r'OD']}
# position units (native: mm)
-position = {'kind': 'position',
- # can't have same name as energy nm
- 'nm_p': ['x/1e6', '1e6/x', r'nm'],
+position = {'nm_p': ['x/1e6', '1e6/x', r'nm'],
'um': ['x/1000.', '1000/x.', r'um'],
'mm': ['x', 'x', r'mm'],
'cm': ['10.*x', 'x/10.', r'cm'],
'in': ['x*0.039370', '0.039370*x', r'in']}
# pulse width units (native: FWHM)
-pulse_width = {'kind': 'pulse_width',
- 'FWHM': ['x', 'x', r'FWHM']}
+pulse_width = {'FWHM': ['x', 'x', r'FWHM']}
# time units (native: s)
-time = {'kind': 'time',
- 'fs_t': ['x/1e15', 'x*1e15', r'fs'],
+time = {'fs_t': ['x/1e15', 'x*1e15', r'fs'],
'ps_t': ['x/1e12', 'x*1e12', r'ps'],
'ns_t': ['x/1e9', 'x*1e9', r'ns'],
'us_t': ['x/1e6', 'x*1e6', r'us'],
@@ -73,7 +64,18 @@ time = {'kind': 'time',
'h_t': ['x*3600.', 'x/3600.', r'h'],
'd_t': ['x*86400.', 'x/86400.', r'd']}
-unit_dicts = [angle, delay, energy, time, position, pulse_width, fluence, od]
+dicts = collections.OrderedDict()
+dicts['angle'] = angle
+dicts['delay'] = delay
+dicts['energy'] = energy
+dicts['time'] = time
+dicts['position'] = position
+dicts['pulse_width'] = pulse_width
+dicts['fluence'] = fluence
+dicts['od'] = od
+
+
+# --- functions -----------------------------------------------------------------------------------
def converter(val, current_unit, destination_unit):
@@ -94,7 +96,7 @@ def converter(val, current_unit, destination_unit):
Converted value.
"""
x = val
- for dic in unit_dicts:
+ for dic in dicts.values():
if current_unit in dic.keys() and destination_unit in dic.keys():
try:
native = eval(dic[current_unit][0])
@@ -111,86 +113,71 @@ def converter(val, current_unit, destination_unit):
pass
else:
warnings.warn('conversion {0} to {1} not valid: returning input'.format(
- current_unit, destination_unit))
+ current_unit, destination_unit))
return val
-def kind(units):
- """Find the kind of given units.
+def get_symbol(units):
+ """Get default symbol type.
Parameters
----------
- units : string
- The units of interest
+ units_str : string
+ Units.
Returns
-------
string
- The kind of the given units. If no match is found, returns None.
+ LaTeX formatted symbol.
"""
- for d in unit_dicts:
- if units in d.keys():
- return str(d['kind'])
-
-
-# --- symbol --------------------------------------------------------------------------------------
-
-
-class SymbolDict(dict):
- """Subclass dictionary to get at __missing__ method."""
-
- def __missing__(self, key):
- """Define what happens when key is missing."""
- return self['default']
-
-
-# color
-color_symbols = SymbolDict()
-color_symbols['default'] = r'E'
-color_symbols['nm'] = r'\lambda'
-color_symbols['wn'] = r'\bar\nu'
-color_symbols['eV'] = r'\hslash\omega'
-color_symbols['Hz'] = r'f'
-color_symbols['THz'] = r'f'
-color_symbols['GHz'] = r'f'
+ if kind(units) == 'energy':
+ d = {}
+ d['nm'] = r'\lambda'
+ d['wn'] = r'\bar\nu'
+ d['eV'] = r'\hslash\omega'
+ d['Hz'] = r'f'
+ d['THz'] = r'f'
+ d['GHz'] = r'f'
+ return d.get(units, 'E')
+ elif kind(units) == 'delay':
+ return r'\tau'
+ elif kind(units) == 'fluence':
+ return r'\mathcal{F}'
+ elif kind(units) == 'pulse_width':
+ return r'\sigma'
+ else:
+ return kind(units)
-# delay
-delay_symbols = SymbolDict()
-delay_symbols['default'] = r'\tau'
-# fluence
-fluence_symbols = SymbolDict()
-fluence_symbols['default'] = r'\mathcal{F}'
+def get_valid_conversions(units):
+ valid = list(dicts[kind(units)])
+ valid.remove(units)
+ return tuple(valid)
-# pulse width
-pulse_width_symbols = SymbolDict()
-pulse_width_symbols['default'] = r'\sigma'
-# catch all
-none_symbols = SymbolDict()
-none_symbols['default'] = ''
+def is_valid_conversion(a, b):
+ for dic in dicts.values():
+ if a in dic.keys() and b in dic.keys():
+ return True
+ if a is None and b is None:
+ return True
+ else:
+ return False
-def get_default_symbol_type(units_str):
- """Get default symbol type.
+def kind(units):
+ """Find the kind of given units.
Parameters
----------
- units_str : string
- Units.
+ units : string
+ The units of interest
Returns
-------
string
- Symbol dictionary name.
+ The kind of the given units. If no match is found, returns None.
"""
- if units_str in ['nm', 'wn', 'eV']:
- return 'color_symbols'
- elif units_str in ['fs', 'ps', 'ns']:
- return 'delay_symbols'
- elif units_str in ['uJ per sq. cm']:
- return 'fluence_symbols'
- elif units_str in ['FWHM']:
- return 'pulse_width_symbols'
- else:
- return 'none_symbols'
+ for k, v in dicts.items():
+ if units in v.keys():
+ return k
| api for "forcing" unit conversion of underlying variables
currently, `data.convert` and `axis.convert` simply change the units that are returned when slicing into the axis object---the variables themselves remain untouched---I like this behavior and want to keep it
in addition to the above, users should be able to convert the variables directly
step one: add `convert` method to `Variable` class
step two: add `convert_variables=False` kwarg to `data.convert`, `axis.convert`, which toggles propagation to dependent variables
we should think carefully, because variables can appear in multiple axes | wright-group/WrightTools | diff --git a/tests/data/axis/convert_axis.py b/tests/data/axis/convert_axis.py
new file mode 100644
index 0000000..4e1408b
--- /dev/null
+++ b/tests/data/axis/convert_axis.py
@@ -0,0 +1,68 @@
+"""Test axis unit conversion."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import numpy as np
+
+import WrightTools as wt
+from WrightTools import datasets
+
+
+# --- define --------------------------------------------------------------------------------------
+
+
+def test_convert_variables():
+ p = datasets.KENT.LDS821_TRSF
+ ignore = ['d1', 'd2', 'wm']
+ data = wt.data.from_KENT(p, ignore=ignore)
+ data.w2.convert('meV', convert_variables=True)
+ assert data.w2.units == 'meV'
+ assert data['w2'].units == 'meV'
+ data.close()
+
+
+def test_exception():
+ p = datasets.PyCMDS.w1_000
+ data = wt.data.from_PyCMDS(p)
+ try:
+ data.w1.convert('fs')
+ except wt.exceptions.UnitsError:
+ assert True
+ else:
+ assert False
+ assert data.w1.units == 'nm'
+ assert data['w1'].units == 'nm'
+ data.close()
+
+
+def test_w1_wa():
+ p = datasets.PyCMDS.w1_wa_000
+ data = wt.data.from_PyCMDS(p)
+ assert data.wa.units == 'nm'
+ data.wa.convert('eV')
+ assert data.wa.units == 'eV'
+ assert np.isclose(data.wa.max(), 1.5802564757220569)
+ assert np.isclose(data.wa.min(), 0.6726385958618104)
+ assert data['wa'].units == 'nm'
+ data.close()
+
+
+def test_wigner():
+ p = datasets.COLORS.v2p2_WL_wigner
+ data = wt.data.from_COLORS(p)
+ data.d1.convert('ns')
+ assert data.d1.units == 'ns'
+ assert data['d1'].units == 'fs'
+ data.close()
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test_convert_variables()
+ test_exception()
+ test_w1_wa()
+ test_wigner()
diff --git a/tests/data/chop.py b/tests/data/chop.py
index 1b59ed1..0f2f37e 100755
--- a/tests/data/chop.py
+++ b/tests/data/chop.py
@@ -20,6 +20,8 @@ def test_2D_to_1D():
for d in chop.values():
assert d.w2.size == 81
assert d.axis_expressions == ('w2',)
+ for k in data.variable_names:
+ assert d[k].label == data[k].label
data.close()
chop.close()
@@ -85,4 +87,5 @@ def test_parent():
if __name__ == "__main__":
+ test_2D_to_1D()
test_3D_to_1D()
diff --git a/tests/data/convert_data.py b/tests/data/convert_data.py
new file mode 100644
index 0000000..e232390
--- /dev/null
+++ b/tests/data/convert_data.py
@@ -0,0 +1,57 @@
+"""Test data unit conversion."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import numpy as np
+
+import WrightTools as wt
+from WrightTools import datasets
+
+
+# --- define --------------------------------------------------------------------------------------
+
+
+def test_convert_variables():
+ p = datasets.KENT.LDS821_TRSF
+ ignore = ['d1', 'd2', 'wm']
+ data = wt.data.from_KENT(p, ignore=ignore)
+ data.convert('meV', convert_variables=True)
+ assert data.w1.units == 'meV'
+ assert data.w2.units == 'meV'
+ assert data['w2'].units == 'meV'
+ assert data['w2'].units == 'meV'
+ data.close()
+
+
+def test_w1_wa():
+ p = datasets.PyCMDS.w1_wa_000
+ data = wt.data.from_PyCMDS(p)
+ assert data.wa.units == 'nm'
+ data.convert('eV')
+ assert data.wa.units == 'eV'
+ assert np.isclose(data.wa.max(), 1.5802564757220569)
+ assert np.isclose(data.wa.min(), 0.6726385958618104)
+ assert data['wa'].units == 'nm'
+ data.close()
+
+
+def test_wigner():
+ p = datasets.COLORS.v2p2_WL_wigner
+ data = wt.data.from_COLORS(p)
+ data.convert('ns')
+ assert data.d1.units == 'ns'
+ assert data['d1'].units == 'fs'
+ assert data.wm.units == 'nm'
+ assert data['wm'].units == 'nm'
+ data.close()
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test_convert_variables()
+ test_w1_wa()
+ test_wigner()
diff --git a/tests/dataset/convert_dataset.py b/tests/dataset/convert_dataset.py
new file mode 100644
index 0000000..6c82d5a
--- /dev/null
+++ b/tests/dataset/convert_dataset.py
@@ -0,0 +1,53 @@
+"""Test dataset unit conversion."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import numpy as np
+
+import WrightTools as wt
+from WrightTools import datasets
+
+
+# --- define --------------------------------------------------------------------------------------
+
+
+def test_exception():
+ p = datasets.PyCMDS.w1_000
+ data = wt.data.from_PyCMDS(p)
+ try:
+ data['w1'].convert('fs')
+ except wt.exceptions.UnitsError:
+ assert True
+ else:
+ assert False
+ assert data['w1'].units == 'nm'
+ data.close()
+
+
+def test_w1_wa():
+ p = datasets.PyCMDS.w1_wa_000
+ data = wt.data.from_PyCMDS(p)
+ assert data['wa'].units == 'nm'
+ data['wa'].convert('eV')
+ assert np.isclose(data['wa'].max(), 1.5802564757220569)
+ assert np.isclose(data['wa'].min(), 0.6726385958618104)
+ data.close()
+
+
+def test_wigner():
+ p = datasets.COLORS.v2p2_WL_wigner
+ data = wt.data.from_COLORS(p)
+ data['d1'].convert('ns')
+ assert data['d1'].units == 'ns'
+ data.close()
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test_exception()
+ test_w1_wa()
+ test_wigner()
diff --git a/tests/kit/valid_index.py b/tests/kit/valid_index.py
new file mode 100644
index 0000000..16a0e53
--- /dev/null
+++ b/tests/kit/valid_index.py
@@ -0,0 +1,65 @@
+"""Test valid index function."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import WrightTools as wt
+
+
+# --- test ----------------------------------------------------------------------------------------
+
+
+def test__1_5__7():
+ index = (1, 5)
+ shape = (7,)
+ assert wt.kit.valid_index(index, shape) == (5,)
+
+
+def test__4_2_12__1_25_1():
+ index = (4, 2, 12)
+ shape = (1, 25, 1)
+ assert wt.kit.valid_index(index, shape) == (0, 2, 0)
+
+
+def test__s__23():
+ index = (slice(None),)
+ shape = (23,)
+ assert wt.kit.valid_index(index, shape) == (slice(None),)
+
+
+def test__s__1_25():
+ index = (slice(None),)
+ shape = (1, 25,)
+ assert wt.kit.valid_index(index, shape) == (slice(None), slice(None))
+
+
+def test__ss_ss__1_25():
+ index = (slice(20, None, 1), slice(20, None, 1))
+ shape = (1, 25,)
+ assert wt.kit.valid_index(index, shape) == (slice(None), slice(20, None, 1))
+
+
+def test__s__13_25_99():
+ index = (slice(None),)
+ shape = (13, 25, 99)
+ assert wt.kit.valid_index(index, shape) == (slice(None), slice(None), slice(None))
+
+
+def test__s_s__51():
+ index = (slice(None), slice(None))
+ shape = (51,)
+ assert wt.kit.valid_index(index, shape) == (slice(None),)
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test__1_5__7()
+ test__4_2_12__1_25_1()
+ test__s__23()
+ test__s__1_25()
+ test__ss_ss__1_25()
+ test__s__13_25_99()
+ test__s_s__51()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 5
} | 2.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
fonttools==4.56.0
h5py==3.13.0
imageio==2.37.0
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
numexpr==2.10.2
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.13.1
six==1.17.0
tidy_headers==1.0.4
tomli==2.2.1
-e git+https://github.com/wright-group/WrightTools.git@49cb335e4b0dd3556304ec72daa65cd812493f3b#egg=WrightTools
zipp==3.21.0
| name: WrightTools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- h5py==3.13.0
- imageio==2.37.0
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numexpr==2.10.2
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.13.1
- six==1.17.0
- tidy-headers==1.0.4
- tomli==2.2.1
- zipp==3.21.0
prefix: /opt/conda/envs/WrightTools
| [
"tests/kit/valid_index.py::test__s__1_25",
"tests/kit/valid_index.py::test__ss_ss__1_25",
"tests/kit/valid_index.py::test__s__13_25_99"
] | [
"tests/data/axis/convert_axis.py::test_convert_variables",
"tests/data/axis/convert_axis.py::test_exception",
"tests/data/axis/convert_axis.py::test_w1_wa",
"tests/data/axis/convert_axis.py::test_wigner",
"tests/data/chop.py::test_2D_to_1D",
"tests/data/chop.py::test_3D_to_1D",
"tests/data/chop.py::test_3D_to_1D_at",
"tests/data/chop.py::test_3D_to_2D",
"tests/data/chop.py::test_3D_to_2D_at",
"tests/data/chop.py::test_parent",
"tests/data/convert_data.py::test_convert_variables",
"tests/data/convert_data.py::test_w1_wa",
"tests/data/convert_data.py::test_wigner",
"tests/dataset/convert_dataset.py::test_exception",
"tests/dataset/convert_dataset.py::test_w1_wa",
"tests/dataset/convert_dataset.py::test_wigner"
] | [
"tests/kit/valid_index.py::test__1_5__7",
"tests/kit/valid_index.py::test__4_2_12__1_25_1",
"tests/kit/valid_index.py::test__s__23",
"tests/kit/valid_index.py::test__s_s__51"
] | [] | MIT License | 2,041 | 4,079 | [
"WrightTools/_dataset.py",
"WrightTools/data/_axis.py",
"WrightTools/data/_data.py",
"WrightTools/kit/_array.py",
"WrightTools/units.py"
] |
nginxinc__crossplane-23 | 8709d938119f967ce938dd5163b233ce5439d30d | 2018-01-18 01:31:29 | 8709d938119f967ce938dd5163b233ce5439d30d | diff --git a/crossplane/analyzer.py b/crossplane/analyzer.py
index f5a5ffb..ec89dab 100644
--- a/crossplane/analyzer.py
+++ b/crossplane/analyzer.py
@@ -1920,7 +1920,7 @@ def analyze(fname, stmt, term, ctx=()):
reason = '"%s" directive is not allowed here' % directive
raise NgxParserDirectiveContextError(reason, fname, line)
- valid_flag = lambda x: x in ('on', 'off')
+ valid_flag = lambda x: x.lower() in ('on', 'off')
# do this in reverse because we only throw errors at the end if no masks
# are valid, and typically the first bit mask is what the parser expects
@@ -1942,6 +1942,8 @@ def analyze(fname, stmt, term, ctx=()):
(mask & NGX_CONF_1MORE and n_args >= 1) or
(mask & NGX_CONF_2MORE and n_args >= 2)):
return
+ elif mask & NGX_CONF_FLAG and n_args == 1 and not valid_flag(args[0]):
+ reason = 'invalid value "%s" in "%%s" directive, it must be "on" or "off"' % args[0]
else:
reason = 'invalid number of arguments in "%s" directive'
| NGINX_CONF_FLAG directives should support uppercase ON or OFF as args
`crossplane parse` throws an error for this config but `nginx` does not:
```nginx
events {
accept_mutex OFF;
}
``` | nginxinc/crossplane | diff --git a/tests/test_analyze.py b/tests/test_analyze.py
index ccb4091..8c36815 100644
--- a/tests/test_analyze.py
+++ b/tests/test_analyze.py
@@ -31,3 +31,30 @@ def test_state_directive():
raise Exception("bad context for 'state' passed: " + repr(ctx))
except crossplane.errors.NgxParserDirectiveContextError:
continue
+
+
+def test_flag_directive_args():
+ fname = '/path/to/nginx.conf'
+ ctx = ('events',)
+
+ # an NGINX_CONF_FLAG directive
+ stmt = {
+ 'directive': 'accept_mutex',
+ 'line': 2 # this is arbitrary
+ }
+
+ good_args = [['on'], ['off'], ['On'], ['Off'], ['ON'], ['OFF']]
+
+ for args in good_args:
+ stmt['args'] = args
+ crossplane.analyzer.analyze(fname, stmt, term=';', ctx=ctx)
+
+ bad_args = [['1'], ['0'], ['true'], ['okay'], ['']]
+
+ for args in bad_args:
+ stmt['args'] = args
+ try:
+ crossplane.analyzer.analyze(fname, stmt, term=';', ctx=ctx)
+ raise Exception('bad args for flag directive: ' + repr(args))
+ except crossplane.errors.NgxParserDirectiveArgumentsError as e:
+ assert e.strerror.endswith('it must be "on" or "off"')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/nginxinc/crossplane.git@8709d938119f967ce938dd5163b233ce5439d30d#egg=crossplane
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: crossplane
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/crossplane
| [
"tests/test_analyze.py::test_flag_directive_args"
] | [] | [
"tests/test_analyze.py::test_state_directive"
] | [] | Apache License 2.0 | 2,054 | 320 | [
"crossplane/analyzer.py"
] |
|
nathandines__SPF2IP-3 | 7e3593a6f322c39a02c1c0f4a108b046ec6c1a20 | 2018-01-19 22:24:43 | 7e3593a6f322c39a02c1c0f4a108b046ec6c1a20 | diff --git a/SPF2IP.py b/SPF2IP.py
index e6210f3..b95903e 100644
--- a/SPF2IP.py
+++ b/SPF2IP.py
@@ -29,14 +29,22 @@ def dns_request_unicode(hostname,record_type,*args,**kwargs):
value = value.decode('utf-8')
output.append(value)
elif record_type == "MX":
- value = entry.exchange
- if type(value) is not unicode:
- value = value.__str__().encode('utf-8').decode('utf-8')
+ try:
+ value = entry.exchange.decode('utf-8')
+ except AttributeError as err:
+ if err.args[0] == "'Name' object has no attribute 'decode'":
+ value = unicode(entry.exchange)
+ else:
+ raise
output.append(value)
elif record_type == "TXT":
- value = ''.join([str(ent) for ent in entry.strings])
- if type(value) is not unicode:
- value = value.decode('utf-8')
+ value_array = []
+ for ent in entry.strings:
+ if type(ent) is not unicode:
+ value_array.append(ent.decode('utf-8'))
+ else:
+ value_array.append(ent)
+ value = ''.join(value_array)
output.append(value)
return output
diff --git a/setup.py b/setup.py
index 3b958d6..3561be0 100755
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
setup(
name='SPF2IP',
- version='1.0.4',
+ version='1.0.5',
description='Python module to get IP addresses from an SPF record',
long_description=long_description,
| Broken on 3.6
The current code always returns an empty answer on Python 3.6. This fixes it for me:
```
diff --git a/SPF2IP.py b/SPF2IP.py
index e6210f3..84683ff 100644
--- a/SPF2IP.py
+++ b/SPF2IP.py
@@ -34,7 +34,7 @@ def dns_request_unicode(hostname,record_type,*args,**kwargs):
value = value.__str__().encode('utf-8').decode('utf-8')
output.append(value)
elif record_type == "TXT":
- value = ''.join([str(ent) for ent in entry.strings])
+ value = ''.join([str(ent, encoding='ascii') for ent in entry.strings])
if type(value) is not unicode:
value = value.decode('utf-8')
output.append(value)
```
I only know python3 so I can't give good advice on making code that works on both 2 and 3. But a friend of mine's package has this function that you might find useful:
```
def to_native_str(value, encoding='utf-8'):
if isinstance(value, str):
return value
if six.PY3 and isinstance(value, six.binary_type): #pragma: no cover
return value.decode(encoding)
elif six.PY2 and isinstance(value, six.text_type): #pragma: no cover
return value.encode(encoding)
```
| nathandines/SPF2IP | diff --git a/test_SPF2IP.py b/test_SPF2IP.py
index 54caff5..6e88918 100644
--- a/test_SPF2IP.py
+++ b/test_SPF2IP.py
@@ -125,11 +125,13 @@ dns_records = {
class fakedns:
def __init__(self,value,record_type):
if record_type == 'TXT':
- self.strings = value
+ self.strings = []
+ for entry in value:
+ self.strings.append(entry.encode('utf-8'))
elif record_type == 'A' or record_type == 'AAAA':
- self.address = value
+ self.address = value.encode('utf-8')
elif record_type == 'MX':
- self.exchange = value
+ self.exchange = value.encode('utf-8')
def fake_dns_resolver(hostname,record_type):
try:
dns_records[hostname]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | dnspython==2.7.0
exceptiongroup==1.2.2
iniconfig==2.1.0
ipaddress==1.0.23
mock==5.2.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
-e git+https://github.com/nathandines/SPF2IP.git@7e3593a6f322c39a02c1c0f4a108b046ec6c1a20#egg=SPF2IP
tomli==2.2.1
| name: SPF2IP
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- dnspython==2.7.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- ipaddress==1.0.23
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/SPF2IP
| [
"test_SPF2IP.py::SPF2IPTestCases::test_included_list_is_string_list",
"test_SPF2IP.py::SPF2IPTestCases::test_included_loop",
"test_SPF2IP.py::SPF2IPTestCases::test_ip4_results",
"test_SPF2IP.py::SPF2IPTestCases::test_ip6_results",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_ip4",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_ip6",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_external_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_external_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_external_longslash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_is_string_list_with_prefix",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_split_spf"
] | [] | [
"test_SPF2IP.py::SPF2IPTestCases::test_dns_query_method_output",
"test_SPF2IP.py::SPF2IPTestCases::test_domain_without_spf_results",
"test_SPF2IP.py::SPF2IPTestCases::test_included_invalid_spf",
"test_SPF2IP.py::SPF2IPTestCases::test_included_without_includes",
"test_SPF2IP.py::SPF2IPTestCases::test_included_without_spf",
"test_SPF2IP.py::SPF2IPTestCases::test_nonexistent_domain_results",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_empty",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external_longslash",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_invalid_spf",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_without_spf"
] | [] | MIT License | 2,065 | 452 | [
"SPF2IP.py",
"setup.py"
] |
|
asottile__git-code-debt-91 | 44b090434a24fe945747a98fddde5a051f0c7b1d | 2018-01-22 04:17:54 | 9a8dc753514f2a38885101b14dc538aefaa8749f | diff --git a/git_code_debt/generate.py b/git_code_debt/generate.py
index 90d72da..283b26c 100644
--- a/git_code_debt/generate.py
+++ b/git_code_debt/generate.py
@@ -29,7 +29,7 @@ from git_code_debt.write_logic import insert_metric_values
from git_code_debt.write_logic import update_has_data
-def get_metrics(commit, diff, metric_parsers):
+def get_metrics(commit, diff, metric_parsers, exclude):
def get_all_metrics(file_diff_stats):
for metric_parser_cls in metric_parsers:
metric_parser = metric_parser_cls()
@@ -39,6 +39,10 @@ def get_metrics(commit, diff, metric_parsers):
yield metric
file_diff_stats = get_file_diff_stats_from_output(diff)
+ file_diff_stats = tuple(
+ x for x in file_diff_stats
+ if not exclude.search(x.path)
+ )
return tuple(get_all_metrics(file_diff_stats))
@@ -47,13 +51,13 @@ def increment_metric_values(metric_values, metrics):
metric_values[metric.name] += metric.value
-def _get_metrics_inner(m_args):
- compare_commit, commit, repo_parser, metric_parsers = m_args
+def _get_metrics_inner(mp_args):
+ compare_commit, commit, repo_parser, metric_parsers, exclude = mp_args
if compare_commit is None:
diff = repo_parser.get_original_commit(commit.sha)
else:
diff = repo_parser.get_commit_diff(compare_commit.sha, commit.sha)
- return get_metrics(commit, diff, metric_parsers)
+ return get_metrics(commit, diff, metric_parsers, exclude)
def mapper(jobs):
@@ -68,6 +72,7 @@ def load_data(
repo,
package_names,
skip_defaults,
+ exclude,
jobs,
):
metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults)
@@ -102,6 +107,7 @@ def load_data(
commits,
itertools.repeat(repo_parser),
itertools.repeat(metric_parsers),
+ itertools.repeat(exclude),
)
do_map = mapper(jobs)
for commit, metrics in six.moves.zip(
@@ -176,6 +182,7 @@ def main(argv=None):
args.repo,
args.metric_package_names,
args.skip_default_metrics,
+ args.exclude,
parsed_args.jobs,
)
diff --git a/git_code_debt/generate_config.py b/git_code_debt/generate_config.py
index ad9fce6..302a8ca 100644
--- a/git_code_debt/generate_config.py
+++ b/git_code_debt/generate_config.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import collections
+import re
import jsonschema
@@ -17,6 +18,7 @@ GENERATE_OPTIONS_SCHEMA = {
'metric_package_names': {'type': 'array', 'items': {'type': 'string'}},
'repo': {'type': 'string'},
'database': {'type': 'string'},
+ 'exclude': {'type': 'string'},
},
}
@@ -28,6 +30,7 @@ class GenerateOptions(collections.namedtuple(
'metric_package_names',
'repo',
'database',
+ 'exclude',
),
)):
@classmethod
@@ -38,4 +41,5 @@ class GenerateOptions(collections.namedtuple(
metric_package_names=yaml_dict.get('metric_package_names', []),
repo=yaml_dict['repo'],
database=yaml_dict['database'],
+ exclude=re.compile(yaml_dict.get('exclude', '^$').encode()),
)
diff --git a/git_code_debt/server/servlets/widget.py b/git_code_debt/server/servlets/widget.py
index a023861..6264a09 100644
--- a/git_code_debt/server/servlets/widget.py
+++ b/git_code_debt/server/servlets/widget.py
@@ -35,7 +35,7 @@ def data():
parsers = get_metric_parsers_from_args(
metric_config.metric_package_names, skip_defaults=False,
)
- metrics = get_metrics(Commit.blank, diff, parsers)
+ metrics = get_metrics(Commit.blank, diff, parsers, metric_config.exclude)
metrics = [
metric for metric in metrics
if metric.value and metric.name in metric_names
| Add `exclude` pattern
Add the ability to remove checked in files from the pattern which are not part of the codebase. For example:
```yaml
exclude: '^vendor/'
``` | asottile/git-code-debt | diff --git a/tests/generate_config_test.py b/tests/generate_config_test.py
index 889b074..3449297 100644
--- a/tests/generate_config_test.py
+++ b/tests/generate_config_test.py
@@ -1,6 +1,8 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import re
+
import jsonschema.exceptions
import pytest
@@ -18,12 +20,14 @@ def test_with_all_options_specified():
'metric_package_names': ['my_package'],
'repo': '.',
'database': 'database.db',
+ 'exclude': '^vendor/',
})
assert ret == GenerateOptions(
skip_default_metrics=True,
metric_package_names=['my_package'],
repo='.',
database='database.db',
+ exclude=re.compile(b'^vendor/'),
)
@@ -34,17 +38,5 @@ def test_minimal_defaults():
metric_package_names=[],
repo='./',
database='database.db',
- )
-
-
-def test_none_for_tempdir_allowed():
- ret = GenerateOptions.from_yaml({
- 'repo': 'repo',
- 'database': 'database.db',
- })
- assert ret == GenerateOptions(
- skip_default_metrics=False,
- metric_package_names=[],
- repo='repo',
- database='database.db',
+ exclude=re.compile(b'^$'),
)
diff --git a/tests/generate_test.py b/tests/generate_test.py
index 2f05015..62eb1c6 100644
--- a/tests/generate_test.py
+++ b/tests/generate_test.py
@@ -3,12 +3,11 @@ from __future__ import unicode_literals
import collections
import io
-import os
import os.path
+import re
import sqlite3
import pytest
-import yaml
from git_code_debt.discovery import get_metric_parsers_from_args
from git_code_debt.generate import _get_metrics_inner
@@ -43,7 +42,7 @@ def test_get_metrics_inner_first_commit(cloneable_with_commits):
with repo_parser.repo_checked_out():
metrics = _get_metrics_inner((
None, cloneable_with_commits.commits[0],
- repo_parser, [LinesOfCodeParser],
+ repo_parser, [LinesOfCodeParser], re.compile(b'^$'),
))
assert Metric(name='TotalLinesOfCode', value=0) in metrics
@@ -54,7 +53,7 @@ def test_get_metrics_inner_nth_commit(cloneable_with_commits):
metrics = _get_metrics_inner((
cloneable_with_commits.commits[-2],
cloneable_with_commits.commits[-1],
- repo_parser, [LinesOfCodeParser],
+ repo_parser, [LinesOfCodeParser], re.compile(b'^$'),
))
assert Metric(name='TotalLinesOfCode', value=2) in metrics
@@ -73,18 +72,6 @@ def test_generate_integration(sandbox, cloneable):
main(('-C', sandbox.gen_config(repo=cloneable)))
-def test_generate_integration_config_file(sandbox, cloneable, tempdir_factory):
- tmpdir = tempdir_factory.get()
- config_filename = os.path.join(tmpdir, 'generate_config.yaml')
- with io.open(config_filename, 'w') as config_file:
- yaml.dump(
- {'repo': cloneable, 'database': sandbox.db_path},
- stream=config_file,
- )
- with cwd(tmpdir):
- main([])
-
-
def test_main_database_does_not_exist(sandbox, cloneable):
new_db_path = os.path.join(sandbox.directory, 'new.db')
cfg = sandbox.gen_config(database=new_db_path, repo=cloneable)
@@ -157,6 +144,25 @@ def test_moves_handled_properly(sandbox, cloneable):
assert not main(('-C', sandbox.gen_config(repo=cloneable)))
+def test_exclude_pattern(sandbox, cloneable_with_commits):
+ cfg = sandbox.gen_config(
+ repo=cloneable_with_commits.path, exclude='\.tmpl$',
+ )
+ assert not main(('-C', cfg))
+ with sandbox.db() as db:
+ query = (
+ 'SELECT running_value\n'
+ 'FROM metric_data\n'
+ 'INNER JOIN metric_names ON\n'
+ ' metric_data.metric_id == metric_names.id\n'
+ 'WHERE sha = ? AND name = "TotalLinesOfCode"\n'
+ )
+ sha = cloneable_with_commits.commits[-1].sha
+ val, = db.execute(query, (sha,)).fetchone()
+ # 2 lines of code from test.py, 0 lines from foo.tmpl (2 lines)
+ assert val == 2
+
+
def test_get_options_from_config_no_config_file():
with pytest.raises(SystemExit):
get_options_from_config('i-dont-exist')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-env"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
blinker==1.9.0
cfgv==3.4.0
click==8.1.8
coverage==7.8.0
cssselect==1.3.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
Flask==3.1.0
-e git+https://github.com/asottile/git-code-debt.git@44b090434a24fe945747a98fddde5a051f0c7b1d#egg=git_code_debt
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
itsdangerous==2.2.0
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
lxml==5.3.1
Mako==1.3.9
MarkupSafe==3.0.2
mock==5.2.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
pyquery==2.0.1
pytest==8.3.5
pytest-env==1.1.5
PyYAML==6.0.2
referencing==0.36.2
rpds-py==0.24.0
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
virtualenv==20.30.0
Werkzeug==3.1.3
zipp==3.21.0
| name: git-code-debt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- blinker==1.9.0
- cfgv==3.4.0
- click==8.1.8
- coverage==7.8.0
- cssselect==1.3.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flask==3.1.0
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- itsdangerous==2.2.0
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- lxml==5.3.1
- mako==1.3.9
- markupsafe==3.0.2
- mock==5.2.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pyquery==2.0.1
- pytest==8.3.5
- pytest-env==1.1.5
- pyyaml==6.0.2
- referencing==0.36.2
- rpds-py==0.24.0
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- virtualenv==20.30.0
- werkzeug==3.1.3
- zipp==3.21.0
prefix: /opt/conda/envs/git-code-debt
| [
"tests/generate_config_test.py::test_with_all_options_specified",
"tests/generate_config_test.py::test_minimal_defaults",
"tests/generate_test.py::test_get_metrics_inner_first_commit",
"tests/generate_test.py::test_get_metrics_inner_nth_commit"
] | [
"tests/generate_test.py::test_generate_integration",
"tests/generate_test.py::test_main_database_does_not_exist",
"tests/generate_test.py::test_generate_integration_previous_data",
"tests/generate_test.py::test_generate_new_data_created",
"tests/generate_test.py::test_regression_for_issue_10",
"tests/generate_test.py::test_moves_handled_properly",
"tests/generate_test.py::test_exclude_pattern"
] | [
"tests/generate_config_test.py::test_empty_config_invalid",
"tests/generate_test.py::test_increment_metrics_first_time",
"tests/generate_test.py::test_increment_metrics_already_there",
"tests/generate_test.py::test_mapper[1]",
"tests/generate_test.py::test_mapper[4]",
"tests/generate_test.py::test_get_options_from_config_no_config_file",
"tests/generate_test.py::test_create_schema",
"tests/generate_test.py::test_populate_metric_ids"
] | [] | MIT License | 2,074 | 1,014 | [
"git_code_debt/generate.py",
"git_code_debt/generate_config.py",
"git_code_debt/server/servlets/widget.py"
] |
|
firebase__firebase-admin-python-122 | d2d0060ec805f85a73b6b203d6df1d3c9e74cb8b | 2018-01-26 23:00:56 | d2d0060ec805f85a73b6b203d6df1d3c9e74cb8b | diff --git a/firebase_admin/__init__.py b/firebase_admin/__init__.py
index 0bf08cd..03471a6 100644
--- a/firebase_admin/__init__.py
+++ b/firebase_admin/__init__.py
@@ -218,15 +218,38 @@ class App(object):
self._options = _AppOptions(options)
self._lock = threading.RLock()
self._services = {}
- pid = self._options.get('projectId')
+ self._project_id = App._lookup_project_id(self._credential, self._options)
+
+ @classmethod
+ def _lookup_project_id(cls, credential, options):
+ """Looks up the Firebase project ID associated with an App.
+
+ This method first inspects the app options for a ``projectId`` entry. Then it attempts to
+ get the project ID from the credential used to initialize the app. If that also fails,
+ attempts to look up the ``GCLOUD_PROJECT`` environment variable.
+
+ Args:
+ credential: A Firebase credential instance.
+ options: A Firebase AppOptions instance.
+
+ Returns:
+ str: A project ID string or None.
+
+ Raises:
+ ValueError: If a non-string project ID value is specified.
+ """
+ pid = options.get('projectId')
if not pid:
try:
- pid = self._credential.project_id
+ pid = credential.project_id
except AttributeError:
pass
if not pid:
pid = os.environ.get('GCLOUD_PROJECT')
- self._project_id = pid
+ if pid is not None and not isinstance(pid, six.string_types):
+ raise ValueError(
+ 'Invalid project ID: "{0}". project ID must be a string.'.format(pid))
+ return pid
@property
def name(self):
diff --git a/firebase_admin/firestore.py b/firebase_admin/firestore.py
index 0191c00..1c32368 100644
--- a/firebase_admin/firestore.py
+++ b/firebase_admin/firestore.py
@@ -28,8 +28,6 @@ except ImportError:
raise ImportError('Failed to import the Cloud Firestore library for Python. Make sure '
'to install the "google-cloud-firestore" module.')
-import six
-
from firebase_admin import _utils
@@ -75,7 +73,4 @@ class _FirestoreClient(object):
'Project ID is required to access Firestore. Either set the projectId option, '
'or use service account credentials. Alternatively, set the GCLOUD_PROJECT '
'environment variable.')
- elif not isinstance(project, six.string_types):
- raise ValueError(
- 'Invalid project ID: "{0}". project ID must be a string.'.format(project))
return _FirestoreClient(credentials, project)
diff --git a/firebase_admin/instance_id.py b/firebase_admin/instance_id.py
index 5e4f5d4..70ace55 100644
--- a/firebase_admin/instance_id.py
+++ b/firebase_admin/instance_id.py
@@ -79,9 +79,6 @@ class _InstanceIdService(object):
'Project ID is required to access Instance ID service. Either set the projectId '
'option, or use service account credentials. Alternatively, set the '
'GCLOUD_PROJECT environment variable.')
- elif not isinstance(project_id, six.string_types):
- raise ValueError(
- 'Invalid project ID: "{0}". project ID must be a string.'.format(project_id))
self._project_id = project_id
self._client = _http_client.JsonHttpClient(
credential=app.credential.get_credential(), base_url=_IID_SERVICE_URL)
| Validate Project ID String Globally
We currently check if project_id is a string in each of the service modules. This can be done in one place -- namely when the project_id is first read in the `App.__init__()` method. | firebase/firebase-admin-python | diff --git a/tests/test_app.py b/tests/test_app.py
index e4450eb..aaa3f0a 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -317,6 +317,11 @@ class TestFirebaseApp(object):
if project_id:
os.environ[GCLOUD_PROJECT] = project_id
+ def test_non_string_project_id(self):
+ options = {'projectId': {'key': 'not a string'}}
+ with pytest.raises(ValueError):
+ firebase_admin.initialize_app(CREDENTIAL, options=options)
+
def test_app_get(self, init_app):
assert init_app is firebase_admin.get_app(init_app.name)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 2.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==1.4.9
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
-e git+https://github.com/firebase/firebase-admin-python.git@d2d0060ec805f85a73b6b203d6df1d3c9e74cb8b#egg=firebase_admin
google-api-core==2.24.2
google-auth==2.38.0
google-cloud-core==2.4.3
google-cloud-firestore==2.20.1
google-cloud-storage==3.1.0
google-crc32c==1.7.1
google-resumable-media==2.7.2
googleapis-common-protos==1.69.2
grpcio==1.71.0
grpcio-status==1.71.0
idna==3.10
iniconfig==2.1.0
isort==6.0.1
lazy-object-proxy==1.10.0
mccabe==0.7.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
proto-plus==1.26.1
protobuf==5.29.4
pyasn1==0.6.1
pyasn1_modules==0.4.2
pylint==1.6.4
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
rsa==4.9
six==1.17.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
wrapt==1.17.2
| name: firebase-admin-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==1.4.9
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- google-api-core==2.24.2
- google-auth==2.38.0
- google-cloud-core==2.4.3
- google-cloud-firestore==2.20.1
- google-cloud-storage==3.1.0
- google-crc32c==1.7.1
- google-resumable-media==2.7.2
- googleapis-common-protos==1.69.2
- grpcio==1.71.0
- grpcio-status==1.71.0
- idna==3.10
- iniconfig==2.1.0
- isort==6.0.1
- lazy-object-proxy==1.10.0
- mccabe==0.7.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- proto-plus==1.26.1
- protobuf==5.29.4
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pylint==1.6.4
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- rsa==4.9
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- wrapt==1.17.2
prefix: /opt/conda/envs/firebase-admin-python
| [
"tests/test_app.py::TestFirebaseApp::test_non_string_project_id"
] | [] | [
"tests/test_app.py::TestFirebaseApp::test_default_app_init[cert]",
"tests/test_app.py::TestFirebaseApp::test_default_app_init[refreshtoken]",
"tests/test_app.py::TestFirebaseApp::test_default_app_init[explicit-appdefault]",
"tests/test_app.py::TestFirebaseApp::test_default_app_init[implicit-appdefault]",
"tests/test_app.py::TestFirebaseApp::test_non_default_app_init[cert]",
"tests/test_app.py::TestFirebaseApp::test_non_default_app_init[refreshtoken]",
"tests/test_app.py::TestFirebaseApp::test_non_default_app_init[explicit-appdefault]",
"tests/test_app.py::TestFirebaseApp::test_non_default_app_init[implicit-appdefault]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[foo]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[0]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[1]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[cred4]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[cred5]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[cred6]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[True]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_credential[False]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_options[]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_options[0]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_options[1]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_options[options3]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_options[options4]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_options[True]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_options[False]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[None]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[0]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[1]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[name4]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[name5]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[name6]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[True]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_name[False]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_config_file[firebase_config_empty.json]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_config_file[firebase_config_invalid.json]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_config_file[no_such_file]",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_invalid_config_string",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_default_config[Environment",
"tests/test_app.py::TestFirebaseApp::test_app_init_with_default_config[Invalid",
"tests/test_app.py::TestFirebaseApp::test_project_id_from_options[cert]",
"tests/test_app.py::TestFirebaseApp::test_project_id_from_options[refreshtoken]",
"tests/test_app.py::TestFirebaseApp::test_project_id_from_options[explicit-appdefault]",
"tests/test_app.py::TestFirebaseApp::test_project_id_from_options[implicit-appdefault]",
"tests/test_app.py::TestFirebaseApp::test_project_id_from_credentials",
"tests/test_app.py::TestFirebaseApp::test_project_id_from_environment",
"tests/test_app.py::TestFirebaseApp::test_no_project_id",
"tests/test_app.py::TestFirebaseApp::test_app_get[DefaultApp]",
"tests/test_app.py::TestFirebaseApp::test_app_get[CustomApp]",
"tests/test_app.py::TestFirebaseApp::test_non_existing_app_get[DefaultApp]",
"tests/test_app.py::TestFirebaseApp::test_non_existing_app_get[CustomApp]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[None]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[0]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[1]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[name4]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[name5]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[name6]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[True]",
"tests/test_app.py::TestFirebaseApp::test_app_get_with_invalid_name[False]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[None]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[0]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[1]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[app4]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[app5]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[app6]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[True]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[False]",
"tests/test_app.py::TestFirebaseApp::test_invalid_app_delete[app9]",
"tests/test_app.py::TestFirebaseApp::test_app_delete[DefaultApp]",
"tests/test_app.py::TestFirebaseApp::test_app_delete[CustomApp]",
"tests/test_app.py::TestFirebaseApp::test_app_services[DefaultApp]",
"tests/test_app.py::TestFirebaseApp::test_app_services[CustomApp]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[0]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[1]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[True]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[False]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[str]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[arg5]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[arg6]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_arg[arg7]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_app[DefaultApp]",
"tests/test_app.py::TestFirebaseApp::test_app_services_invalid_app[CustomApp]"
] | [] | Apache License 2.0 | 2,090 | 818 | [
"firebase_admin/__init__.py",
"firebase_admin/firestore.py",
"firebase_admin/instance_id.py"
] |
|
geopandas__geopandas-656 | f70a66918df086bd0fca37f03e160224f400fe14 | 2018-01-29 03:56:04 | caefd7562a5cfd80cc86b37796a22f4bfa3aa9d2 | codecov[bot]: # [Codecov](https://codecov.io/gh/geopandas/geopandas/pull/656?src=pr&el=h1) Report
> Merging [#656](https://codecov.io/gh/geopandas/geopandas/pull/656?src=pr&el=desc) into [master](https://codecov.io/gh/geopandas/geopandas/commit/36c8bcda34e4f46315eda812b11a0d9db32d01f2?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `100%`.
[](https://codecov.io/gh/geopandas/geopandas/pull/656?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #656 +/- ##
=======================================
Coverage 94.83% 94.83%
=======================================
Files 14 14
Lines 1065 1065
=======================================
Hits 1010 1010
Misses 55 55
```
| [Impacted Files](https://codecov.io/gh/geopandas/geopandas/pull/656?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [geopandas/plotting.py](https://codecov.io/gh/geopandas/geopandas/pull/656/diff?src=pr&el=tree#diff-Z2VvcGFuZGFzL3Bsb3R0aW5nLnB5) | `93.93% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/geopandas/geopandas/pull/656?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/geopandas/geopandas/pull/656?src=pr&el=footer). Last update [36c8bcd...ed1caa5](https://codecov.io/gh/geopandas/geopandas/pull/656?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
jdmcbr: @martinfleis @jorisvandenbossche I got this at least to a merge-able state since I saw it came up recently. It seems like something along these lines would still be useful, though maybe a better way than what I did here.
martinfleis: I fully support this PR. Keeping these plotting methods part of our public API was probably never intended and we do not approach them as such anyway.
martinfleis: I have merged in changes from master and added test for DeprecationWarning. Once green we are ready to merge as there were no objections against and it is not even breaking change (for now). | diff --git a/geopandas/plotting.py b/geopandas/plotting.py
index f915a569..6b0b096d 100644
--- a/geopandas/plotting.py
+++ b/geopandas/plotting.py
@@ -6,6 +6,21 @@ import pandas as pd
import geopandas
+def deprecated(new):
+ """Helper to provide deprecation warning."""
+
+ def old(*args, **kwargs):
+ warnings.warn(
+ "{} is intended for internal ".format(new.__name__[1:])
+ + "use only, and will be deprecated.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ new(*args, **kwargs)
+
+ return old
+
+
def _flatten_multi_geoms(geoms, prefix="Multi"):
"""
Returns Series like geoms and index, except that any Multi geometries
@@ -17,7 +32,6 @@ def _flatten_multi_geoms(geoms, prefix="Multi"):
Returns
-------
-
components : list of geometry
component_index : index array
@@ -40,7 +54,7 @@ def _flatten_multi_geoms(geoms, prefix="Multi"):
return components, np.array(component_index)
-def plot_polygon_collection(
+def _plot_polygon_collection(
ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs
):
"""
@@ -48,32 +62,25 @@ def plot_polygon_collection(
Parameters
----------
-
ax : matplotlib.axes.Axes
where shapes will be plotted
-
geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
Otherwise follows `color` / `facecolor` kwargs.
-
edgecolor : single color or sequence of `N` colors
Color for the edge of the polygons
-
facecolor : single color or sequence of `N` colors
Color to fill the polygons. Cannot be used together with `values`.
-
color : single color or sequence of `N` colors
Sets both `edgecolor` and `facecolor`
-
**kwargs
Additional keyword arguments passed to the collection
Returns
-------
-
collection : matplotlib.collections.Collection that was plotted
"""
@@ -130,7 +137,10 @@ def plot_polygon_collection(
return collection
-def plot_linestring_collection(
+plot_polygon_collection = deprecated(_plot_polygon_collection)
+
+
+def _plot_linestring_collection(
ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs
):
"""
@@ -138,25 +148,19 @@ def plot_linestring_collection(
Parameters
----------
-
ax : matplotlib.axes.Axes
where shapes will be plotted
-
geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be
mixed)
-
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
-
color : single color or sequence of `N` colors
Cannot be used together with `values`.
Returns
-------
-
collection : matplotlib.collections.Collection that was plotted
-
"""
from matplotlib.collections import LineCollection
from matplotlib.colors import is_color_like
@@ -194,7 +198,10 @@ def plot_linestring_collection(
return collection
-def plot_point_collection(
+plot_linestring_collection = deprecated(_plot_linestring_collection)
+
+
+def _plot_point_collection(
ax,
geoms,
values=None,
@@ -264,6 +271,9 @@ def plot_point_collection(
return collection
+plot_point_collection = deprecated(_plot_point_collection)
+
+
def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
"""
Plot a GeoSeries.
@@ -369,7 +379,7 @@ def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
facecolor = color
values_ = values[poly_idx] if cmap else None
- plot_polygon_collection(
+ _plot_polygon_collection(
ax, polys, values_, facecolor=facecolor, cmap=cmap, **style_kwds
)
@@ -377,7 +387,7 @@ def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
lines = expl_series[line_idx]
if not lines.empty:
values_ = values[line_idx] if cmap else None
- plot_linestring_collection(
+ _plot_linestring_collection(
ax, lines, values_, color=color, cmap=cmap, **style_kwds
)
@@ -385,7 +395,9 @@ def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
points = expl_series[point_idx]
if not points.empty:
values_ = values[point_idx] if cmap else None
- plot_point_collection(ax, points, values_, color=color, cmap=cmap, **style_kwds)
+ _plot_point_collection(
+ ax, points, values_, color=color, cmap=cmap, **style_kwds
+ )
plt.draw()
return ax
@@ -616,7 +628,7 @@ def plot_dataframe(
polys = expl_series[poly_idx & np.invert(nan_idx)]
subset = values[poly_idx & np.invert(nan_idx)]
if not polys.empty:
- plot_polygon_collection(
+ _plot_polygon_collection(
ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds
)
@@ -624,7 +636,7 @@ def plot_dataframe(
lines = expl_series[line_idx & np.invert(nan_idx)]
subset = values[line_idx & np.invert(nan_idx)]
if not lines.empty:
- plot_linestring_collection(
+ _plot_linestring_collection(
ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds
)
@@ -635,7 +647,7 @@ def plot_dataframe(
if isinstance(markersize, np.ndarray):
markersize = np.take(markersize, multiindex, axis=0)
markersize = markersize[point_idx & np.invert(nan_idx)]
- plot_point_collection(
+ _plot_point_collection(
ax,
points,
subset,
@@ -746,7 +758,6 @@ def _mapclassify_choro(values, scheme, **classification_kwds):
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
-
"""
try:
import mapclassify.classifiers as classifiers
| geopandas.plotting.plot_point_collection
geopandas.plotting.plot_point_collection(ax, shapely.geometry.Point(1,2))
x = geoms.x.values
y = geoms.y.values
, line 193, in plot_point_collection
x = geoms.x.values
AttributeError: 'float' object has no attribute 'values'
geopandas.plotting.plot_point_collection(ax, [shapely.geometry.Point(1,2), shapely.geometry.Point(3,3)])
line 193, in plot_point_collection
x = geoms.x.values
AttributeError: 'list' object has no attribute 'x' | geopandas/geopandas | diff --git a/geopandas/tests/test_plotting.py b/geopandas/tests/test_plotting.py
index 306e3b6c..5ffacf1c 100644
--- a/geopandas/tests/test_plotting.py
+++ b/geopandas/tests/test_plotting.py
@@ -744,40 +744,40 @@ class TestPlotCollections:
# failing with matplotlib 1.4.3 (edge stays black even when specified)
pytest.importorskip("matplotlib", "1.5.0")
- from geopandas.plotting import plot_point_collection
+ from geopandas.plotting import _plot_point_collection, plot_point_collection
from matplotlib.collections import PathCollection
fig, ax = plt.subplots()
- coll = plot_point_collection(ax, self.points)
+ coll = _plot_point_collection(ax, self.points)
assert isinstance(coll, PathCollection)
ax.cla()
# default: single default matplotlib color
- coll = plot_point_collection(ax, self.points)
+ coll = _plot_point_collection(ax, self.points)
_check_colors(self.N, coll.get_facecolors(), [MPL_DFT_COLOR] * self.N)
# edgecolor depends on matplotlib version
# _check_colors(self.N, coll.get_edgecolors(), [MPL_DFT_COLOR]*self.N)
ax.cla()
# specify single other color
- coll = plot_point_collection(ax, self.points, color="g")
+ coll = _plot_point_collection(ax, self.points, color="g")
_check_colors(self.N, coll.get_facecolors(), ["g"] * self.N)
_check_colors(self.N, coll.get_edgecolors(), ["g"] * self.N)
ax.cla()
# specify edgecolor/facecolor
- coll = plot_point_collection(ax, self.points, facecolor="g", edgecolor="r")
+ coll = _plot_point_collection(ax, self.points, facecolor="g", edgecolor="r")
_check_colors(self.N, coll.get_facecolors(), ["g"] * self.N)
_check_colors(self.N, coll.get_edgecolors(), ["r"] * self.N)
ax.cla()
# list of colors
- coll = plot_point_collection(ax, self.points, color=["r", "g", "b"])
+ coll = _plot_point_collection(ax, self.points, color=["r", "g", "b"])
_check_colors(self.N, coll.get_facecolors(), ["r", "g", "b"])
_check_colors(self.N, coll.get_edgecolors(), ["r", "g", "b"])
ax.cla()
- coll = plot_point_collection(
+ coll = _plot_point_collection(
ax,
self.points,
color=[(0.5, 0.5, 0.5, 0.5), (0.1, 0.2, 0.3, 0.5), (0.4, 0.5, 0.6, 0.5)],
@@ -796,14 +796,18 @@ class TestPlotCollections:
# not a color
with pytest.raises(TypeError):
- plot_point_collection(ax, self.points, color="not color")
+ _plot_point_collection(ax, self.points, color="not color")
+
+ # check DeprecationWarning
+ with pytest.warns(DeprecationWarning):
+ plot_point_collection(ax, self.points)
def test_points_values(self):
- from geopandas.plotting import plot_point_collection
+ from geopandas.plotting import _plot_point_collection
# default colormap
fig, ax = plt.subplots()
- coll = plot_point_collection(ax, self.points, self.values)
+ coll = _plot_point_collection(ax, self.points, self.values)
fig.canvas.draw_idle()
cmap = plt.get_cmap()
expected_colors = cmap(np.arange(self.N) / (self.N - 1))
@@ -812,36 +816,39 @@ class TestPlotCollections:
# _check_colors(self.N, coll.get_edgecolors(), expected_colors)
def test_linestrings(self):
- from geopandas.plotting import plot_linestring_collection
+ from geopandas.plotting import (
+ _plot_linestring_collection,
+ plot_linestring_collection,
+ )
from matplotlib.collections import LineCollection
fig, ax = plt.subplots()
- coll = plot_linestring_collection(ax, self.lines)
+ coll = _plot_linestring_collection(ax, self.lines)
assert isinstance(coll, LineCollection)
ax.cla()
# default: single default matplotlib color
- coll = plot_linestring_collection(ax, self.lines)
+ coll = _plot_linestring_collection(ax, self.lines)
_check_colors(self.N, coll.get_color(), [MPL_DFT_COLOR] * self.N)
ax.cla()
# specify single other color
- coll = plot_linestring_collection(ax, self.lines, color="g")
+ coll = _plot_linestring_collection(ax, self.lines, color="g")
_check_colors(self.N, coll.get_colors(), ["g"] * self.N)
ax.cla()
# specify edgecolor / facecolor
- coll = plot_linestring_collection(ax, self.lines, facecolor="g", edgecolor="r")
+ coll = _plot_linestring_collection(ax, self.lines, facecolor="g", edgecolor="r")
_check_colors(self.N, coll.get_facecolors(), ["g"] * self.N)
_check_colors(self.N, coll.get_edgecolors(), ["r"] * self.N)
ax.cla()
# list of colors
- coll = plot_linestring_collection(ax, self.lines, color=["r", "g", "b"])
+ coll = _plot_linestring_collection(ax, self.lines, color=["r", "g", "b"])
_check_colors(self.N, coll.get_colors(), ["r", "g", "b"])
ax.cla()
- coll = plot_linestring_collection(
+ coll = _plot_linestring_collection(
ax,
self.lines,
color=[(0.5, 0.5, 0.5, 0.5), (0.1, 0.2, 0.3, 0.5), (0.4, 0.5, 0.6, 0.5)],
@@ -854,7 +861,7 @@ class TestPlotCollections:
ax.cla()
# pass through of kwargs
- coll = plot_linestring_collection(ax, self.lines, linestyle="--", linewidth=1)
+ coll = _plot_linestring_collection(ax, self.lines, linestyle="--", linewidth=1)
exp_ls = _style_to_linestring_onoffseq("dashed", 1)
res_ls = coll.get_linestyle()[0]
assert res_ls[0] == exp_ls[0]
@@ -863,15 +870,19 @@ class TestPlotCollections:
# not a color
with pytest.raises(TypeError):
- plot_linestring_collection(ax, self.lines, color="not color")
+ _plot_linestring_collection(ax, self.lines, color="not color")
+
+ # check DeprecationWarning
+ with pytest.warns(DeprecationWarning):
+ plot_linestring_collection(ax, self.lines)
def test_linestrings_values(self):
- from geopandas.plotting import plot_linestring_collection
+ from geopandas.plotting import _plot_linestring_collection
fig, ax = plt.subplots()
# default colormap
- coll = plot_linestring_collection(ax, self.lines, self.values)
+ coll = _plot_linestring_collection(ax, self.lines, self.values)
fig.canvas.draw_idle()
cmap = plt.get_cmap()
expected_colors = cmap(np.arange(self.N) / (self.N - 1))
@@ -879,7 +890,7 @@ class TestPlotCollections:
ax.cla()
# specify colormap
- coll = plot_linestring_collection(ax, self.lines, self.values, cmap="RdBu")
+ coll = _plot_linestring_collection(ax, self.lines, self.values, cmap="RdBu")
fig.canvas.draw_idle()
cmap = plt.get_cmap("RdBu")
expected_colors = cmap(np.arange(self.N) / (self.N - 1))
@@ -887,7 +898,7 @@ class TestPlotCollections:
ax.cla()
# specify vmin/vmax
- coll = plot_linestring_collection(ax, self.lines, self.values, vmin=3, vmax=5)
+ coll = _plot_linestring_collection(ax, self.lines, self.values, vmin=3, vmax=5)
fig.canvas.draw_idle()
cmap = plt.get_cmap()
expected_colors = cmap([0])
@@ -895,33 +906,33 @@ class TestPlotCollections:
ax.cla()
def test_polygons(self):
- from geopandas.plotting import plot_polygon_collection
+ from geopandas.plotting import _plot_polygon_collection, plot_polygon_collection
from matplotlib.collections import PatchCollection
fig, ax = plt.subplots()
- coll = plot_polygon_collection(ax, self.polygons)
+ coll = _plot_polygon_collection(ax, self.polygons)
assert isinstance(coll, PatchCollection)
ax.cla()
# default: single default matplotlib color
- coll = plot_polygon_collection(ax, self.polygons)
+ coll = _plot_polygon_collection(ax, self.polygons)
_check_colors(self.N, coll.get_facecolor(), [MPL_DFT_COLOR] * self.N)
_check_colors(self.N, coll.get_edgecolor(), ["k"] * self.N)
ax.cla()
# default: color sets both facecolor and edgecolor
- coll = plot_polygon_collection(ax, self.polygons, color="g")
+ coll = _plot_polygon_collection(ax, self.polygons, color="g")
_check_colors(self.N, coll.get_facecolor(), ["g"] * self.N)
_check_colors(self.N, coll.get_edgecolor(), ["g"] * self.N)
ax.cla()
# default: color can be passed as a list
- coll = plot_polygon_collection(ax, self.polygons, color=["g", "b", "r"])
+ coll = _plot_polygon_collection(ax, self.polygons, color=["g", "b", "r"])
_check_colors(self.N, coll.get_facecolor(), ["g", "b", "r"])
_check_colors(self.N, coll.get_edgecolor(), ["g", "b", "r"])
ax.cla()
- coll = plot_polygon_collection(
+ coll = _plot_polygon_collection(
ax,
self.polygons,
color=[(0.5, 0.5, 0.5, 0.5), (0.1, 0.2, 0.3, 0.5), (0.4, 0.5, 0.6, 0.5)],
@@ -939,28 +950,31 @@ class TestPlotCollections:
ax.cla()
# only setting facecolor keeps default for edgecolor
- coll = plot_polygon_collection(ax, self.polygons, facecolor="g")
+ coll = _plot_polygon_collection(ax, self.polygons, facecolor="g")
_check_colors(self.N, coll.get_facecolor(), ["g"] * self.N)
_check_colors(self.N, coll.get_edgecolor(), ["k"] * self.N)
ax.cla()
# custom facecolor and edgecolor
- coll = plot_polygon_collection(ax, self.polygons, facecolor="g", edgecolor="r")
+ coll = _plot_polygon_collection(ax, self.polygons, facecolor="g", edgecolor="r")
_check_colors(self.N, coll.get_facecolor(), ["g"] * self.N)
_check_colors(self.N, coll.get_edgecolor(), ["r"] * self.N)
ax.cla()
# not a color
with pytest.raises(TypeError):
- plot_polygon_collection(ax, self.polygons, color="not color")
+ _plot_polygon_collection(ax, self.polygons, color="not color")
+ # check DeprecationWarning
+ with pytest.warns(DeprecationWarning):
+ plot_polygon_collection(ax, self.polygons)
def test_polygons_values(self):
- from geopandas.plotting import plot_polygon_collection
+ from geopandas.plotting import _plot_polygon_collection
fig, ax = plt.subplots()
# default colormap, edge is still black by default
- coll = plot_polygon_collection(ax, self.polygons, self.values)
+ coll = _plot_polygon_collection(ax, self.polygons, self.values)
fig.canvas.draw_idle()
cmap = plt.get_cmap()
exp_colors = cmap(np.arange(self.N) / (self.N - 1))
@@ -970,7 +984,7 @@ class TestPlotCollections:
ax.cla()
# specify colormap
- coll = plot_polygon_collection(ax, self.polygons, self.values, cmap="RdBu")
+ coll = _plot_polygon_collection(ax, self.polygons, self.values, cmap="RdBu")
fig.canvas.draw_idle()
cmap = plt.get_cmap("RdBu")
exp_colors = cmap(np.arange(self.N) / (self.N - 1))
@@ -978,7 +992,7 @@ class TestPlotCollections:
ax.cla()
# specify vmin/vmax
- coll = plot_polygon_collection(ax, self.polygons, self.values, vmin=3, vmax=5)
+ coll = _plot_polygon_collection(ax, self.polygons, self.values, vmin=3, vmax=5)
fig.canvas.draw_idle()
cmap = plt.get_cmap()
exp_colors = cmap([0])
@@ -986,7 +1000,7 @@ class TestPlotCollections:
ax.cla()
# override edgecolor
- coll = plot_polygon_collection(ax, self.polygons, self.values, edgecolor="g")
+ coll = _plot_polygon_collection(ax, self.polygons, self.values, edgecolor="g")
fig.canvas.draw_idle()
cmap = plt.get_cmap()
exp_colors = cmap(np.arange(self.N) / (self.N - 1))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"black",
"flake8",
"descartes",
"matplotlib",
"rtree"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
black==25.1.0
certifi==2025.1.31
click==8.1.8
click-plugins==1.1.1
cligj==0.7.2
contourpy==1.3.0
cycler==0.12.1
Cython==3.0.12
descartes==1.1.0
exceptiongroup==1.2.2
fiona==1.10.1
flake8==7.2.0
fonttools==4.56.0
-e git+https://github.com/geopandas/geopandas.git@f70a66918df086bd0fca37f03e160224f400fe14#egg=geopandas
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
mccabe==0.7.0
mypy-extensions==1.0.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pathspec==0.12.1
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pyparsing==3.2.3
pyproj==3.6.1
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
rtree==1.4.0
shapely==2.0.7
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
tzdata==2025.2
zipp==3.21.0
| name: geopandas
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- black==25.1.0
- certifi==2025.1.31
- click==8.1.8
- click-plugins==1.1.1
- cligj==0.7.2
- contourpy==1.3.0
- cycler==0.12.1
- cython==3.0.12
- descartes==1.1.0
- exceptiongroup==1.2.2
- fiona==1.10.1
- flake8==7.2.0
- fonttools==4.56.0
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- mccabe==0.7.0
- mypy-extensions==1.0.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pathspec==0.12.1
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pyparsing==3.2.3
- pyproj==3.6.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- rtree==1.4.0
- shapely==2.0.7
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- tzdata==2025.2
- zipp==3.21.0
prefix: /opt/conda/envs/geopandas
| [
"geopandas/tests/test_plotting.py::TestPlotCollections::test_points",
"geopandas/tests/test_plotting.py::TestPlotCollections::test_points_values"
] | [
"geopandas/tests/test_plotting.py::TestPointPlotting::test_legend",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_multipoints",
"geopandas/tests/test_plotting.py::TestLineStringPlotting::test_single_color",
"geopandas/tests/test_plotting.py::TestLineStringPlotting::test_style_kwargs",
"geopandas/tests/test_plotting.py::TestLineStringPlotting::test_subplots_norm",
"geopandas/tests/test_plotting.py::TestLineStringPlotting::test_multilinestrings",
"geopandas/tests/test_plotting.py::TestPolygonZPlotting::test_plot",
"geopandas/tests/test_plotting.py::TestGeometryCollectionPlotting::test_colors",
"geopandas/tests/test_plotting.py::TestGeometryCollectionPlotting::test_values",
"geopandas/tests/test_plotting.py::TestNonuniformGeometryPlotting::test_colors",
"geopandas/tests/test_plotting.py::TestNonuniformGeometryPlotting::test_style_kwargs",
"geopandas/tests/test_plotting.py::TestPlotCollections::test_linestrings",
"geopandas/tests/test_plotting.py::TestPlotCollections::test_linestrings_values",
"geopandas/tests/test_plotting.py::TestPlotCollections::test_polygons",
"geopandas/tests/test_plotting.py::TestPlotCollections::test_polygons_values",
"geopandas/tests/test_plotting.py::test_column_values"
] | [
"geopandas/tests/test_plotting.py::TestPointPlotting::test_figsize",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_default_colors",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_colormap",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_single_color",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_markersize",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_style_kwargs",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_subplots_norm",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_empty_plot",
"geopandas/tests/test_plotting.py::TestPointPlotting::test_misssing",
"geopandas/tests/test_plotting.py::TestPointZPlotting::test_plot"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,096 | 1,620 | [
"geopandas/plotting.py"
] |
automl__SMAC3-389 | 4e3816d77bc3d2b5406bb71b2fe93da2831ebb0f | 2018-01-31 23:40:47 | f710fa60dbf2c64e42ce14aa0eb529f92378560a | diff --git a/examples/branin/restore_state.py b/examples/branin/restore_state.py
index 12cb6b51d..55cce7426 100644
--- a/examples/branin/restore_state.py
+++ b/examples/branin/restore_state.py
@@ -23,12 +23,15 @@ def main():
'deterministic' : True,
'output_dir' : 'restore_me'}
original_scenario = Scenario(orig_scen_dict)
- smac = SMAC(scenario=original_scenario)
+ smac = SMAC(scenario=original_scenario,
+ run_id=1)
smac.optimize()
print("\n########## BUDGET EXHAUSTED! Restoring optimization: ##########\n")
- # Now the output is in the folder 'restore_me'
+ # Now the output is in the folder 'restore_me/run_1' (or whatever run_id has
+ # been passed to the SMAC-object above)
+ old_output_dir = os.path.join(original_scenario.output_dir, 'run_1')
#
# We could simply modify the scenario-object, stored in
# 'smac.solver.scenario' and start optimization again:
@@ -43,22 +46,18 @@ def main():
'output_dir' : 'restored'})
# We load the runhistory, ...
- rh_path = os.path.join(original_scenario.output_dir, "runhistory.json")
+ rh_path = os.path.join(old_output_dir, "runhistory.json")
runhistory = RunHistory(aggregate_func=None)
runhistory.load_json(rh_path, new_scenario.cs)
# ... stats, ...
- stats_path = os.path.join(original_scenario.output_dir, "stats.json")
+ stats_path = os.path.join(old_output_dir, "stats.json")
stats = Stats(new_scenario)
stats.load(stats_path)
# ... and trajectory.
- traj_path = os.path.join(original_scenario.output_dir, "traj_aclib2.json")
+ traj_path = os.path.join(old_output_dir, "traj_aclib2.json")
trajectory = TrajLogger.read_traj_aclib_format(
fn=traj_path, cs=new_scenario.cs)
incumbent = trajectory[-1]["incumbent"]
- # Because we changed the output_dir, we might want to copy the old
- # trajectory-file (runhistory and stats will be complete)
- new_traj_path = os.path.join(new_scenario.output_dir, "traj_aclib2.json")
- shutil.copy(traj_path, new_traj_path)
# Now we can initialize SMAC with the recovered objects and restore the
# state where we left off. By providing stats and a restore_incumbent, SMAC
@@ -66,7 +65,14 @@ def main():
smac = SMAC(scenario=new_scenario,
runhistory=runhistory,
stats=stats,
- restore_incumbent=incumbent)
+ restore_incumbent=incumbent,
+ run_id=1)
+ # Because we changed the output_dir, we might want to copy the old
+ # trajectory-file (runhistory and stats will be complete, but trajectory is
+ # written sequentially)
+ new_traj_path = os.path.join(new_scenario.output_dir, "run_1", "traj_aclib2.json")
+ shutil.copy(traj_path, new_traj_path)
+
smac.optimize()
if "__main__" == __name__:
diff --git a/smac/facade/smac_facade.py b/smac/facade/smac_facade.py
index 3368bac94..5b315dec3 100644
--- a/smac/facade/smac_facade.py
+++ b/smac/facade/smac_facade.py
@@ -128,15 +128,26 @@ class SMAC(object):
aggregate_func = average_cost
self.scenario = scenario
- self.output_dir = create_output_directory(scenario, run_id)
+ self.output_dir = ""
+ if not restore_incumbent:
+ self.output_dir = create_output_directory(scenario, run_id)
+ elif scenario.output_dir is not None:
+ # output-directory is created in CLI when restoring from a
+ # folder. calling the function again in the facade results in two
+ # folders being created: run_X and run_X.OLD. if we are
+ # restoring, the output-folder exists already and we omit creating it,
+ # but set the self-output_dir to the dir.
+ # necessary because we want to write traj to new output-dir in CLI.
+ self.output_dir = os.path.join(scenario.output_dir,
+ "run_%d" % (run_id))
if (
scenario.deterministic is True
and getattr(scenario, 'tuner_timeout', None) is None
and scenario.run_obj == 'quality'
):
self.logger.info('Optimizing a deterministic scenario for '
- 'qualitiy without a tuner timeout - will make '
- 'SMAC deterministi!')
+ 'quality without a tuner timeout - will make '
+ 'SMAC deterministic!')
scenario.intensification_percentage = 1e-10
scenario.write()
@@ -175,7 +186,7 @@ class SMAC(object):
# initial acquisition function
if acquisition_function is None:
acquisition_function = EI(model=model)
-
+
# inject model if necessary
if acquisition_function.model is None:
acquisition_function.model = model
diff --git a/smac/smac_cli.py b/smac/smac_cli.py
index b2382c0c7..4bab14f49 100644
--- a/smac/smac_cli.py
+++ b/smac/smac_cli.py
@@ -64,21 +64,19 @@ class SMACCLI(object):
stats = None
incumbent = None
- # Restore state (needs to be before scenario-creation!)
- if args_.restore_state:
- root_logger.debug("Restoring state from %s...", args_.restore_state)
- rh, stats, traj_list_aclib, traj_list_old = self.restore_state_before_scen(args_)
-
# Create scenario-object
scen = Scenario(args_.scenario_file, misc_args)
- # Restore state (continued, needs to be after scenario-creation!)
+ # Restore state
if args_.restore_state:
+ root_logger.debug("Restoring state from %s...", args_.restore_state)
+ rh, stats, traj_list_aclib, traj_list_old = self.restore_state(scen, args_)
+
scen.output_dir_for_this_run = create_output_directory(
scen, args_.seed, root_logger,
)
scen.write()
- stats, incumbent = self.restore_state_after_scen(scen, stats,
+ incumbent = self.restore_state_after_output_dir(scen, stats,
traj_list_aclib, traj_list_old)
if args_.warmstart_runhistory:
@@ -129,12 +127,9 @@ class SMACCLI(object):
except (TAEAbortException, FirstRunCrashedException) as err:
self.logger.error(err)
- def restore_state_before_scen(self, args_):
+ def restore_state(self, scen, args_):
"""Read in files for state-restoration: runhistory, stats, trajectory.
"""
- # Construct dummy-scenario for object-creation (mainly cs is needed)
- tmp_scen = InputReader().read_scenario_file(args_.scenario_file)
- tmp_scen = Scenario(tmp_scen, cmd_args={'output_dir':''})
# Check for folder and files
rh_path = os.path.join(args_.restore_state, "runhistory.json")
stats_path = os.path.join(args_.restore_state, "stats.json")
@@ -145,9 +140,9 @@ class SMACCLI(object):
raise FileNotFoundError("Could not find folder from which to restore.")
# Load runhistory and stats
rh = RunHistory(aggregate_func=None)
- rh.load_json(rh_path, tmp_scen.cs)
+ rh.load_json(rh_path, scen.cs)
self.logger.debug("Restored runhistory from %s", rh_path)
- stats = Stats(tmp_scen) # Need to inject actual scenario later for output_dir!
+ stats = Stats(scen)
stats.load(stats_path)
self.logger.debug("Restored stats from %s", stats_path)
with open(traj_path_aclib, 'r') as traj_fn:
@@ -156,13 +151,11 @@ class SMACCLI(object):
traj_list_old = traj_fn.readlines()
return rh, stats, traj_list_aclib, traj_list_old
- def restore_state_after_scen(self, scen, stats, traj_list_aclib,
- traj_list_old):
- """Finish processing files for state-restoration. The actual scenario
- needs to be injected into stats, as well as the trajectory dealt with
- (it is read in, but needs to be written to new output-folder after
- scenario is constructed."""
- stats.scenario = scen # inject actual scen for output_dir
+ def restore_state_after_output_dir(self, scen, stats, traj_list_aclib,
+ traj_list_old):
+ """Finish processing files for state-restoration. Trajectory
+ is read in, but needs to be written to new output-folder. Therefore, the
+ output-dir is created. This needs to be considered in the SMAC-facade."""
# write trajectory-list
traj_path_aclib = os.path.join(scen.output_dir, "traj_aclib2.json")
traj_path_old = os.path.join(scen.output_dir, "traj_old.csv")
@@ -171,8 +164,9 @@ class SMACCLI(object):
with open(traj_path_old, 'w') as traj_fn:
traj_fn.writelines(traj_list_old)
# read trajectory to retrieve incumbent
+ # TODO replace this with simple traj_path_aclib?
trajectory = TrajLogger.read_traj_aclib_format(fn=traj_path_aclib, cs=scen.cs)
incumbent = trajectory[-1]["incumbent"]
self.logger.debug("Restored incumbent %s from %s", incumbent,
traj_path_aclib)
- return stats, incumbent
+ return incumbent
| Path to scenario folder to restore state
Hi,
I was running the example on restoring the state for the branin function and I run into this error
```
INFO:smac.scenario.scenario.Scenario:Output to restored
Output directory= restore_me/run_1
['traj_old.csv', 'scenario.txt', 'runhistory.json', 'param_config_space.pcs', 'stats.json', 'traj_aclib2.json']
Traceback (most recent call last):
File "restore_state.py", line 77, in <module>
main()
File "restore_state.py", line 64, in main
shutil.copy(traj_path, new_traj_path)
File "/usr/lib/python3.5/shutil.py", line 235, in copy
copyfile(src, dst, follow_symlinks=follow_symlinks)
File "/usr/lib/python3.5/shutil.py", line 115, in copyfile
with open(dst, 'wb') as fdst:
FileNotFoundError: [Errno 2] No such file or directory: 'restored/traj_aclib2.json'
```
I figure the issue comes from this line:
https://github.com/automl/SMAC3/blob/56a5af9e3666ae54f7c126b62ae44852af65842d/examples/branin/restore_state.py#L46
The output scenario folder should redefined as `output_dir = os.path.join(original_scenario.output_dir, 'run_1')`, right?
| automl/SMAC3 | diff --git a/test/test_cli/test_restore_state.py b/test/test_cli/test_restore_state.py
index 8bcc3f066..1459b5fa9 100644
--- a/test/test_cli/test_restore_state.py
+++ b/test/test_cli/test_restore_state.py
@@ -40,6 +40,7 @@ class TestSMACCLI(unittest.TestCase):
for output_dir in self.output_dirs:
if output_dir:
shutil.rmtree(output_dir, ignore_errors=True)
+ #pass
os.chdir(self.current_dir)
@attr('slow')
@@ -100,7 +101,12 @@ class TestSMACCLI(unittest.TestCase):
# Increase limit and run for 10 (so 5 more) by using restore_state
testargs = ["python", "scripts/smac", "--restore_state",
self.output_one, "--scenario_file",
- self.scenario_one, "--verbose", "DEBUG"]
+ self.scenario_two, "--verbose", "DEBUG"]
with mock.patch.object(sys, 'argv', testargs):
self.smaccli.main_cli()
+ self.assertTrue(os.path.exists(self.output_one))
+ self.assertFalse(os.path.exists(self.output_one + '.OLD'))
+ self.assertTrue(os.path.exists(self.output_two))
+ self.assertFalse(os.path.exists(self.output_two + '.OLD'))
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 3
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y build-essential swig"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
ConfigSpace==0.4.19
Cython==3.0.12
docutils==0.18.1
filelock==3.4.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
joblib==1.1.1
MarkupSafe==2.0.1
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
Pygments==2.14.0
pynisher==0.6.4
pyparsing==3.1.4
pyrfr==0.8.2
pytest==7.0.1
pytz==2025.2
requests==2.27.1
scikit-learn==0.24.2
scipy==1.5.4
six==1.17.0
-e git+https://github.com/automl/SMAC3.git@4e3816d77bc3d2b5406bb71b2fe93da2831ebb0f#egg=smac
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
threadpoolctl==3.1.0
tomli==1.2.3
typing==3.7.4.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: SMAC3
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- configspace==0.4.19
- cython==3.0.12
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- joblib==1.1.1
- markupsafe==2.0.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pygments==2.14.0
- pynisher==0.6.4
- pyparsing==3.1.4
- pyrfr==0.8.2
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- scikit-learn==0.24.2
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- threadpoolctl==3.1.0
- tomli==1.2.3
- typing==3.7.4.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/SMAC3
| [
"test/test_cli/test_restore_state.py::TestSMACCLI::test_same_dir"
] | [] | [
"test/test_cli/test_restore_state.py::TestSMACCLI::test_illegal_input",
"test/test_cli/test_restore_state.py::TestSMACCLI::test_missing_dir",
"test/test_cli/test_restore_state.py::TestSMACCLI::test_run_and_restore"
] | [] | BSD 3-Clause License | 2,105 | 2,322 | [
"examples/branin/restore_state.py",
"smac/facade/smac_facade.py",
"smac/smac_cli.py"
] |
|
burnash__gspread-502 | c65b3893d02d8e3896cfb7dd2d20eb2f0bdcdeb4 | 2018-02-03 23:44:43 | 20f113c9b49081f768ac689aa3475ad3301d7af2 | danthelion: @burnash What do you think about this implementation? Details are in the referenced issue.
burnash: @danthelion thank you for the issue and the PR.
As I said in the referenced issue I agree. I'm sorry for the delay in responding. Since you submitted the PR the code of gspread has significantly changed. If you have time, could you rebase your changes to resolve the conflicts? Also please don't forget to add a test to cover this fix.
danthelion: Sure, I'll get on it as soon as I can. 👍 | diff --git a/gspread/models.py b/gspread/models.py
index db592c7..30b36ea 100644
--- a/gspread/models.py
+++ b/gspread/models.py
@@ -545,11 +545,11 @@ class Worksheet(object):
except KeyError:
return []
- def get_all_records(self, empty2zero=False, head=1, default_blank=""):
- """Returns a list of dictionaries, all of them having the contents
- of the spreadsheet with the head row as keys and each of these
- dictionaries holding the contents of subsequent rows of cells
- as values.
+ def get_all_records(self, empty2zero=False, head=1, default_blank="", allow_underscores_in_numeric_literals=False):
+ """Returns a list of dictionaries, all of them having:
+ - the contents of the spreadsheet's with the head row as keys,
+ And each of these dictionaries holding
+ - the contents of subsequent rows of cells as values.
Cell values are numericised (strings that can be read as ints
or floats are converted).
@@ -564,13 +564,17 @@ class Worksheet(object):
converted to something else except empty string
or zero.
:type default_blank: str
+ :param allow_underscores_in_numeric_literals: (optional) Allow underscores
+ in numeric literals,
+ as introduced in PEP 515
+ :type allow_underscores_in_numeric_literals: bool
"""
idx = head - 1
data = self.get_all_values()
keys = data[idx]
- values = [numericise_all(row, empty2zero, default_blank)
+ values = [numericise_all(row, empty2zero, default_blank, allow_underscores_in_numeric_literals)
for row in data[idx + 1:]]
return [dict(zip(keys, row)) for row in values]
diff --git a/gspread/utils.py b/gspread/utils.py
index 0854611..a966820 100644
--- a/gspread/utils.py
+++ b/gspread/utils.py
@@ -29,7 +29,7 @@ def finditem(func, seq):
return next((item for item in seq if func(item)))
-def numericise(value, empty2zero=False, default_blank=""):
+def numericise(value, empty2zero=False, default_blank="", allow_underscores_in_numeric_literals=False):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
@@ -42,6 +42,10 @@ def numericise(value, empty2zero=False, default_blank=""):
'faa'
>>> numericise("3")
3
+ >>> numericise("3_2", allow_underscores_in_numeric_literals=False)
+ '3_2'
+ >>> numericise("3_2", allow_underscores_in_numeric_literals=True)
+ '32'
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
@@ -58,6 +62,8 @@ def numericise(value, empty2zero=False, default_blank=""):
>>>
"""
if value is not None:
+ if "_" in value and not allow_underscores_in_numeric_literals:
+ return value
try:
value = int(value)
except ValueError:
@@ -73,9 +79,9 @@ def numericise(value, empty2zero=False, default_blank=""):
return value
-def numericise_all(input, empty2zero=False, default_blank=""):
+def numericise_all(input, empty2zero=False, default_blank="", allow_underscores_in_numeric_literals=False):
"""Returns a list of numericised values from strings"""
- return [numericise(s, empty2zero, default_blank) for s in input]
+ return [numericise(s, empty2zero, default_blank, allow_underscores_in_numeric_literals) for s in input]
def rowcol_to_a1(row, col):
| Ambiguity in the `numericise` function depending on Python version.
https://github.com/burnash/gspread/blob/217b43073e1abe9ceb1d65d2d0719d56f0a14642/gspread/utils.py#L58
I have run into some issues when fetching data from spreadsheets using the `get_all_records()` method when using Python 3.6+.
If a worksheet has data in the format `12_34`, which should be interpreted as a string (or numeric if _explicitly_ desired) the `numericise` function will convert it to an `int` because of https://docs.python.org/3/whatsnew/3.6.html#whatsnew36-pep515 introduced in Python3.6, using Python3.5.4 it parses it as the string `12_34`, as expected.
I don't think following PEP515 should be the default behaviour in this case.
Example
Python3.6
```
>>> from gspread.utils import numericise
>>> numericise('18_29')
1829
```
Python3.5.4
```
>>> from gspread.utils import numericise
>>> numericise('18_29')
'18_29'
``` | burnash/gspread | diff --git a/tests/test.py b/tests/test.py
index 431c0f7..ee42246 100644
--- a/tests/test.py
+++ b/tests/test.py
@@ -163,6 +163,20 @@ class UtilsTest(unittest.TestCase):
gid = 'ogsrar0'
self.assertEqual(utils.wid_to_gid(gid), '1015761654')
+ def test_numericise(self):
+ self.assertEqual(utils.numericise('faa'), 'faa')
+ self.assertEqual(utils.numericise('3'), 3)
+ self.assertEqual(utils.numericise('3_2'), '3_2')
+ self.assertEqual(utils.numericise('3_2', allow_underscores_in_numeric_literals=False), '3_2')
+ self.assertEqual(utils.numericise('3_2', allow_underscores_in_numeric_literals=True), 32)
+ self.assertEqual(utils.numericise('3.1'), 3.1)
+ self.assertEqual(utils.numericise('', empty2zero=True), 0)
+ self.assertEqual(utils.numericise('', empty2zero=False), '')
+ self.assertEqual(utils.numericise('', default_blank=None), None)
+ self.assertEqual(utils.numericise('', default_blank='foo'), 'foo')
+ self.assertEqual(utils.numericise(''), '')
+ self.assertEqual(utils.numericise(None), None)
+
class GspreadTest(BetamaxGspreadTest):
def _sequence_generator(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"requests[security]",
"oauth2client",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.8.1
-e git+https://github.com/burnash/betamax-json-body-serializer.git@0945268b69272cf90c55fdfd962f1801295ff30b#egg=betamax_json_body_serializer
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
-e git+https://github.com/burnash/gspread.git@c65b3893d02d8e3896cfb7dd2d20eb2f0bdcdeb4#egg=gspread
httplib2==0.22.0
idna==3.10
iniconfig==2.1.0
nose==1.3.7
oauth2client==4.1.3
packaging==24.2
pluggy==1.5.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pyparsing==3.2.3
pytest==8.3.5
requests==2.32.3
rsa==4.9
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: gspread
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.8.1
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- httplib2==0.22.0
- idna==3.10
- iniconfig==2.1.0
- nose==1.3.7
- oauth2client==4.1.3
- packaging==24.2
- pluggy==1.5.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pyparsing==3.2.3
- pytest==8.3.5
- requests==2.32.3
- rsa==4.9
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/gspread
| [
"tests/test.py::UtilsTest::test_numericise"
] | [] | [
"tests/test.py::UtilsTest::test_a1_to_rowcol",
"tests/test.py::UtilsTest::test_addr_converters",
"tests/test.py::UtilsTest::test_extract_id_from_url",
"tests/test.py::UtilsTest::test_get_gid",
"tests/test.py::UtilsTest::test_no_extract_id_from_url",
"tests/test.py::UtilsTest::test_rowcol_to_a1",
"tests/test.py::ClientTest::test_create",
"tests/test.py::ClientTest::test_import_csv",
"tests/test.py::ClientTest::test_no_found_exeption",
"tests/test.py::ClientTest::test_openall",
"tests/test.py::SpreadsheetTest::test_add_del_worksheet",
"tests/test.py::SpreadsheetTest::test_get_worksheet",
"tests/test.py::SpreadsheetTest::test_properties",
"tests/test.py::SpreadsheetTest::test_sheet1",
"tests/test.py::SpreadsheetTest::test_worksheet",
"tests/test.py::SpreadsheetTest::test_worksheet_iteration",
"tests/test.py::WorksheetTest::test_acell",
"tests/test.py::WorksheetTest::test_append_row",
"tests/test.py::WorksheetTest::test_cell",
"tests/test.py::WorksheetTest::test_clear",
"tests/test.py::WorksheetTest::test_delete_row",
"tests/test.py::WorksheetTest::test_find",
"tests/test.py::WorksheetTest::test_findall",
"tests/test.py::WorksheetTest::test_get_all_records",
"tests/test.py::WorksheetTest::test_get_all_records_different_header",
"tests/test.py::WorksheetTest::test_get_all_values",
"tests/test.py::WorksheetTest::test_insert_row",
"tests/test.py::WorksheetTest::test_range",
"tests/test.py::WorksheetTest::test_resize",
"tests/test.py::WorksheetTest::test_update_acell",
"tests/test.py::WorksheetTest::test_update_cell",
"tests/test.py::WorksheetTest::test_update_cell_multiline",
"tests/test.py::WorksheetTest::test_update_cell_unicode",
"tests/test.py::WorksheetTest::test_update_cells",
"tests/test.py::WorksheetTest::test_update_cells_noncontiguous",
"tests/test.py::WorksheetTest::test_update_cells_unicode",
"tests/test.py::CellTest::test_numeric_value",
"tests/test.py::CellTest::test_properties"
] | [] | MIT License | 2,117 | 924 | [
"gspread/models.py",
"gspread/utils.py"
] |
dpkp__kafka-python-1367 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | 2018-02-05 22:03:47 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py
index f9fcb37..c9bbb97 100644
--- a/kafka/consumer/fetcher.py
+++ b/kafka/consumer/fetcher.py
@@ -835,12 +835,21 @@ class Fetcher(six.Iterator):
return parsed_records
- class PartitionRecords(six.Iterator):
+ class PartitionRecords(object):
def __init__(self, fetch_offset, tp, messages):
self.fetch_offset = fetch_offset
self.topic_partition = tp
self.messages = messages
- self.message_idx = 0
+ # When fetching an offset that is in the middle of a
+ # compressed batch, we will get all messages in the batch.
+ # But we want to start 'take' at the fetch_offset
+ for i, msg in enumerate(messages):
+ if msg.offset == fetch_offset:
+ self.message_idx = i
+ break
+ else:
+ self.message_idx = 0
+ self.messages = None
# For truthiness evaluation we need to define __len__ or __nonzero__
def __len__(self):
| Seek method returning incorrect messages on compressed topic when using max_poll_records
While using seek method of `kafka.consumer.group.seek' for a given partition, offset, we are seeing the inconsistent behavior for the messages returned with the subsequent poll method.
The issue is easily reproducible for the given topic (compacted).
Part of Workflow:
```
from kafka.consumer.group import KafkaConsumer
topic_partition = TopicPartition(topic, 0)
consumer = KafkaConsumer(*consumer_config)
consumer.assign([topic_partition])
start_offset = 100 # Example value: highwatermark - 10
consumer.seek(partition=topic_partition, offset=start_offset)
messages = consumer.poll(timeout_ms=1000, max_records=1)[topic_partition]
message = messages[0]
print('Offset found:', message.offset, 'Expected offset:', start_offset)
Sample Output:
$ Offset found:80 Expected offset:100
```
Observation:
* If iterator interface is used instead of poll interface, the issue no longer exists. My guess is somewhere while polling for messages, the fetched offsets are not updated or fetched messages are not skipped. It looks like iterator method is not using fetched_records api that's why it works fine.
* At times it does give correct messages (especially when given offset is closer to highwatermark)
Please let me know if any other details are required.
| dpkp/kafka-python | diff --git a/test/test_fetcher.py b/test/test_fetcher.py
index 429071a..4547222 100644
--- a/test/test_fetcher.py
+++ b/test/test_fetcher.py
@@ -498,3 +498,43 @@ def test__parse_fetched_data__out_of_range(fetcher, topic, mocker):
partition_record = fetcher._parse_fetched_data(completed_fetch)
assert partition_record is None
assert fetcher._subscriptions.assignment[tp].awaiting_reset is True
+
+
+def test_partition_records_offset():
+ """Test that compressed messagesets are handle correctly
+ when fetch offset is in the middle of the message list
+ """
+ batch_start = 120
+ batch_end = 130
+ fetch_offset = 123
+ tp = TopicPartition('foo', 0)
+ messages = [ConsumerRecord(tp.topic, tp.partition, i,
+ None, None, 'key', 'value', 'checksum', 0, 0)
+ for i in range(batch_start, batch_end)]
+ records = Fetcher.PartitionRecords(fetch_offset, None, messages)
+ assert len(records) > 0
+ msgs = records.take(1)
+ assert msgs[0].offset == 123
+ assert records.fetch_offset == 124
+ msgs = records.take(2)
+ assert len(msgs) == 2
+ assert len(records) > 0
+ records.discard()
+ assert len(records) == 0
+
+
+def test_partition_records_empty():
+ records = Fetcher.PartitionRecords(0, None, [])
+ assert len(records) == 0
+
+
+def test_partition_records_no_fetch_offset():
+ batch_start = 0
+ batch_end = 100
+ fetch_offset = 123
+ tp = TopicPartition('foo', 0)
+ messages = [ConsumerRecord(tp.topic, tp.partition, i,
+ None, None, 'key', 'value', 'checksum', 0, 0)
+ for i in range(batch_start, batch_end)]
+ records = Fetcher.PartitionRecords(fetch_offset, None, messages)
+ assert len(records) == 0
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog",
"pytest-sugar",
"pytest-mock",
"mock",
"python-snappy",
"lz4",
"xxhash"
],
"pre_install": [
"apt-get update",
"apt-get install -y libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@618c5051493693c1305aa9f08e8a0583d5fcf0e3#egg=kafka_python
lz4==3.1.10
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- lz4==3.1.10
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_fetcher.py::test_partition_records_offset",
"test/test_fetcher.py::test_partition_records_no_fetch_offset"
] | [] | [
"test/test_fetcher.py::test_send_fetches",
"test/test_fetcher.py::test_create_fetch_requests[api_version0-3]",
"test/test_fetcher.py::test_create_fetch_requests[api_version1-2]",
"test/test_fetcher.py::test_create_fetch_requests[api_version2-1]",
"test/test_fetcher.py::test_create_fetch_requests[api_version3-0]",
"test/test_fetcher.py::test_update_fetch_positions",
"test/test_fetcher.py::test__reset_offset",
"test/test_fetcher.py::test__send_offset_requests",
"test/test_fetcher.py::test__send_offset_requests_multiple_nodes",
"test/test_fetcher.py::test__handle_offset_response",
"test/test_fetcher.py::test_fetched_records",
"test/test_fetcher.py::test__handle_fetch_response[fetch_request0-fetch_response0-1]",
"test/test_fetcher.py::test__handle_fetch_response[fetch_request1-fetch_response1-2]",
"test/test_fetcher.py::test__handle_fetch_response[fetch_request2-fetch_response2-1]",
"test/test_fetcher.py::test__handle_fetch_response[fetch_request3-fetch_response3-1]",
"test/test_fetcher.py::test__handle_fetch_response[fetch_request4-fetch_response4-1]",
"test/test_fetcher.py::test__handle_fetch_response[fetch_request5-fetch_response5-1]",
"test/test_fetcher.py::test__unpack_message_set",
"test/test_fetcher.py::test__message_generator",
"test/test_fetcher.py::test__parse_fetched_data",
"test/test_fetcher.py::test__parse_fetched_data__paused",
"test/test_fetcher.py::test__parse_fetched_data__stale_offset",
"test/test_fetcher.py::test__parse_fetched_data__not_leader",
"test/test_fetcher.py::test__parse_fetched_data__unknown_tp",
"test/test_fetcher.py::test__parse_fetched_data__out_of_range",
"test/test_fetcher.py::test_partition_records_empty"
] | [] | Apache License 2.0 | 2,122 | 276 | [
"kafka/consumer/fetcher.py"
] |
|
dropbox__pyannotate-64 | b02080a3b340f5b5aa464007510e211ecd0529a3 | 2018-02-07 22:58:52 | 40edbfeed62a78cd683cd3eb56a7412ae40dd124 | diff --git a/pyannotate_runtime/collect_types.py b/pyannotate_runtime/collect_types.py
index ca88866..72d5bbf 100644
--- a/pyannotate_runtime/collect_types.py
+++ b/pyannotate_runtime/collect_types.py
@@ -41,11 +41,13 @@ from typing import (
Any,
Callable,
Dict,
+ Iterable,
Iterator,
List,
NamedTuple,
Optional,
Set,
+ Sized,
Tuple,
TypeVar,
Union,
@@ -54,6 +56,8 @@ from contextlib import contextmanager
# pylint: disable=invalid-name
+CO_GENERATOR = inspect.CO_GENERATOR # type: ignore
+
def _my_hash(arg_list):
# type: (List[Any]) -> int
@@ -84,8 +88,30 @@ class TypeWasIncomparable(object):
pass
+class FakeIterator(Iterable[Any], Sized):
+ """
+ Container for iterator values.
+
+ Note that FakeIterator([a, b, c]) is akin to list([a, b, c]); this
+ is turned into IteratorType by resolve_type().
+ """
+
+ def __init__(self, values):
+ # type: (List[Any]) -> None
+ self.values = values
+
+ def __iter__(self):
+ # type: () -> Iterator[Any]
+ for v in self.values:
+ yield v
+
+ def __len__(self):
+ # type: () -> int
+ return len(self.values)
+
+
_NONE_TYPE = type(None)
-InternalType = Union['DictType', 'ListType', 'TupleType', 'SetType', 'type']
+InternalType = Union['DictType', 'ListType', 'TupleType', 'SetType', 'IteratorType', 'type']
class DictType(object):
@@ -188,6 +214,39 @@ class SetType(object):
return not self.__eq__(other)
+class IteratorType(object):
+ """
+ Internal representation of Iterator type.
+ """
+
+ def __init__(self, val_type):
+ # type: (TentativeType) -> None
+ self.val_type = val_type
+
+ def __repr__(self):
+ # type: () -> str
+ if repr(self.val_type) == 'None':
+ # We didn't see any values, so we don't know what's inside
+ return 'Iterator'
+ else:
+ return 'Iterator[%s]' % (repr(self.val_type))
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self.val_type) if self.val_type else 0
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, IteratorType):
+ return False
+
+ return self.val_type == other.val_type
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not self.__eq__(other)
+
+
class TupleType(object):
"""
Internal representation of Tuple type.
@@ -279,6 +338,9 @@ class TentativeType(object):
elif isinstance(type, ListType):
if EMPTY_LIST_TYPE in self.types_hashable:
self.types_hashable.remove(EMPTY_LIST_TYPE)
+ elif isinstance(type, IteratorType):
+ if EMPTY_ITERATOR_TYPE in self.types_hashable:
+ self.types_hashable.remove(EMPTY_ITERATOR_TYPE)
elif isinstance(type, DictType):
if EMPTY_DICT_TYPE in self.types_hashable:
self.types_hashable.remove(EMPTY_DICT_TYPE)
@@ -350,7 +412,7 @@ def name_from_type(type_):
"""
Helper function to get PEP-484 compatible string representation of our internal types.
"""
- if isinstance(type_, (DictType, ListType, TupleType, SetType)):
+ if isinstance(type_, (DictType, ListType, TupleType, SetType, IteratorType)):
return repr(type_)
else:
if type_.__name__ != 'NoneType':
@@ -369,6 +431,7 @@ def name_from_type(type_):
EMPTY_DICT_TYPE = DictType(TentativeType(), TentativeType())
EMPTY_LIST_TYPE = ListType(TentativeType())
EMPTY_SET_TYPE = SetType(TentativeType())
+EMPTY_ITERATOR_TYPE = IteratorType(TentativeType())
# TODO: Make this faster
@@ -450,6 +513,16 @@ def resolve_type(arg):
for sample_item in sample:
tentative_type.add(resolve_type(sample_item))
return SetType(tentative_type)
+ elif arg_type == FakeIterator:
+ assert isinstance(arg, FakeIterator) # this line helps mypy figure out types
+ sample = []
+ iterator = iter(arg)
+ for i in range(0, min(4, len(arg))):
+ sample.append(next(iterator))
+ tentative_type = TentativeType()
+ for sample_item in sample:
+ tentative_type.add(resolve_type(sample_item))
+ return IteratorType(tentative_type)
elif arg_type == tuple:
assert isinstance(arg, tuple) # this line helps mypy figure out types
sample = list(arg[:min(10, len(arg))])
@@ -715,8 +788,10 @@ _filter_filename = default_filter_filename # type: Callable[[Optional[str]], Op
if sys.version_info[0] == 2:
RETURN_VALUE_OPCODE = chr(opcode.opmap['RETURN_VALUE'])
+ YIELD_VALUE_OPCODE = chr(opcode.opmap['YIELD_VALUE'])
else:
RETURN_VALUE_OPCODE = opcode.opmap['RETURN_VALUE']
+ YIELD_VALUE_OPCODE = opcode.opmap['YIELD_VALUE']
def _trace_dispatch(frame, event, arg):
@@ -777,14 +852,29 @@ def _trace_dispatch(frame, event, arg):
resolved_types = prep_args(arg_info)
_task_queue.put(KeyAndTypes(function_key, resolved_types))
elif event == 'return':
- # This event is also triggered if a function raises an exception.
+ # This event is also triggered if a function yields or raises an exception.
# We can tell the difference by looking at the bytecode.
# (We don't get here for C functions so the bytecode always exists.)
- # TODO: Also recognize YIELD_VALUE opcode.
last_opcode = code.co_code[frame.f_lasti]
- if last_opcode != RETURN_VALUE_OPCODE:
- arg = NoReturnType()
- _task_queue.put(KeyAndReturn(function_key, resolve_type(arg)))
+ if last_opcode == RETURN_VALUE_OPCODE:
+ if code.co_flags & CO_GENERATOR:
+ # Return from a generator.
+ t = resolve_type(FakeIterator([]))
+ else:
+ t = resolve_type(arg)
+ elif last_opcode == YIELD_VALUE_OPCODE:
+ # Yield from a generator.
+ # TODO: Unify generators -- currently each YIELD is turned into
+ # a separate call, so a function yielding ints and strs will be
+ # typed as Union[Iterator[int], Iterator[str]] -- this should be
+ # Iterator[Union[int, str]].
+ t = resolve_type(FakeIterator([arg]))
+ else:
+ # This branch is also taken when returning from a generator.
+ # TODO: returning non-trivial values from generators, per PEP 380;
+ # and async def / await stuff.
+ t = NoReturnType
+ _task_queue.put(KeyAndReturn(function_key, t))
else:
sampling_counters[key] = None # We're not interested in this function.
| Detect yield and return opcodes
There are some [hacks in MonkeyType](https://github.com/Instagram/MonkeyType/blob/0119311745449560e30ef554ba449a99c1b6679d/monkeytype/tracing.py#L226) that detect yield and return opcodes -- the former to generate Generator/Iterator return annotations, the latter to distinguish between return and exceptions. | dropbox/pyannotate | diff --git a/pyannotate_runtime/tests/test_collect_types.py b/pyannotate_runtime/tests/test_collect_types.py
index 06ca1b2..9d98085 100644
--- a/pyannotate_runtime/tests/test_collect_types.py
+++ b/pyannotate_runtime/tests/test_collect_types.py
@@ -594,6 +594,43 @@ class TestCollectTypes(TestBaseClass):
self.assert_type_comments('func_with_unknown_module_types', ['(C) -> C'])
+ def test_yield_basic(self):
+ # type: () -> None
+ def gen(n, a):
+ for i in range(n):
+ yield a
+
+ with self.collecting_types():
+ list(gen(10, 'x'))
+
+ self.assert_type_comments('gen', ['(int, str) -> Iterator[str]'])
+
+ def test_yield_various(self):
+ # type: () -> None
+ def gen(n, a, b):
+ for i in range(n):
+ yield a
+ yield b
+
+ with self.collecting_types():
+ list(gen(10, 'x', 1))
+ list(gen(0, 0, 0))
+
+ # TODO: This should really return Iterator[Union[int, str]]
+ self.assert_type_comments('gen', ['(int, str, int) -> Iterator[int]',
+ '(int, str, int) -> Iterator[str]'])
+
+ def test_yield_empty(self):
+ # type: () -> None
+ def gen():
+ if False:
+ yield
+
+ with self.collecting_types():
+ list(gen())
+
+ self.assert_type_comments('gen', ['() -> Iterator'])
+
def foo(arg):
# type: (Any) -> Any
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
mypy-extensions==1.0.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/dropbox/pyannotate.git@b02080a3b340f5b5aa464007510e211ecd0529a3#egg=pyannotate
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pyannotate
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mypy-extensions==1.0.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pyannotate
| [
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_yield_basic",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_yield_empty",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_yield_various"
] | [] | [
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_callee_star_args",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_caller_star_args",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_default_args",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_fully_qualified_type_name_with_sub_package",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_ignoring_c_calls",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_keyword_args",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_many_signatures",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_no_crash_on_nested_dict_comps",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_no_return",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_only_return",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_recursive_function",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_recursive_function_2",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_run_a_bunch_of_tests",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_skip_lambda",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_star_star_args",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_two_signatures",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_type_collection_on_another_thread",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_type_collection_on_main_thread",
"pyannotate_runtime/tests/test_collect_types.py::TestCollectTypes::test_unknown_module_types",
"pyannotate_runtime/tests/test_collect_types.py::TestInitWithFilter::test_init_with_filter",
"pyannotate_runtime/tests/test_collect_types.py::TestInitWithFilter::test_init_with_none_filter"
] | [] | Apache License 2.0 | 2,131 | 1,725 | [
"pyannotate_runtime/collect_types.py"
] |
|
nipy__nipype-2432 | d7a8085d9230c4f43489ba93742ea1f6401f3ede | 2018-02-08 16:41:43 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py
index 51449632b..0883023f6 100644
--- a/nipype/pipeline/engine/base.py
+++ b/nipype/pipeline/engine/base.py
@@ -84,9 +84,12 @@ class EngineBase(object):
A clone of node or workflow must have a new name
"""
if name == self.name:
- raise ValueError('Cloning requires a new name, "%s" is in use.' % name)
+ raise ValueError('Cloning requires a new name, "%s" is '
+ 'in use.' % name)
clone = deepcopy(self)
clone.name = name
+ if hasattr(clone, '_id'):
+ clone._id = name
return clone
def _check_outputs(self, parameter):
| workflow with iterables and cloned nodes fail when expanding iterables
### Summary
When running a workflow which includes a cloned node and iterables the workflow will fail when expanding the iterables because the id of the cloned node will be the same as the original one.
### Actual behavior
Will result in an error:
Traceback (most recent call last):
File "<ipython-input-55-177d6eaeef2c>", line 27, in <module>
workflow.run()
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/workflows.py", line 592, in run
execgraph = generate_expanded_graph(deepcopy(flatgraph))
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py", line 1042, in generate_expanded_graph
iterable_prefix, inode.synchronize)
File "/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype/pipeline/engine/utils.py", line 733, in _merge_graphs
raise Exception(("Execution graph does not have a unique set of node "
Exception: Execution graph does not have a unique set of node names. Please rerun the workflow
### Expected behavior
Will execute normally without the errors.
### How to replicate the behavior
The following workflow will produce the error.
### Script/Workflow details
```python
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
def addstr(string):
string = ('%s+2' % string )
return string
subject_list = ['sub-001', 'sub-002']
inputnode = pe.Node(niu.IdentityInterface(fields = ['subject']),
name = 'inputnode')
inputnode.iterables = [('subject', subject_list)]
node_1 = pe.Node(niu.Function(input_names='string',
output_names= 'string',
function = addstr),name='node_1')
node_2 = node_1.clone('node_2')
workflow = pe.Workflow(name='my_workflow')
workflow.connect([(inputnode, node_1, [('subject','string')]),
(node_1, node_2, [('string','string')])])
workflow.run()
```
### Platform details:
/data/eaxfjord/anaconda2/lib/python2.7/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
{'nibabel_version': '2.2.1', 'sys_executable': '/data/eaxfjord/anaconda2/bin/python', 'networkx_version': '2.1', 'numpy_version': '1.14.0', 'sys_platform': 'linux2', 'sys_version': '2.7.13 |Anaconda custom (64-bit)| (default, Dec 20 2016, 23:09:15) \n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]', 'commit_source': 'installation', 'commit_hash': '0a5948a0', 'pkg_path': '/data/eaxfjord/anaconda2/lib/python2.7/site-packages/nipype', 'nipype_version': '1.0.0', 'traits_version': '4.6.0', 'scipy_version': '1.0.0'}
1.0.0
### Execution environment
- My python environment outside container
| nipy/nipype | diff --git a/nipype/pipeline/engine/tests/test_base.py b/nipype/pipeline/engine/tests/test_base.py
index 54356fd6c..fd87aa687 100644
--- a/nipype/pipeline/engine/tests/test_base.py
+++ b/nipype/pipeline/engine/tests/test_base.py
@@ -6,6 +6,8 @@ from __future__ import print_function, unicode_literals
import pytest
from ..base import EngineBase
from ....interfaces import base as nib
+from ....interfaces import utility as niu
+from ... import engine as pe
class InputSpec(nib.TraitedSpec):
@@ -64,3 +66,24 @@ def test_clone():
with pytest.raises(ValueError):
base.clone('nodename')
+
+def test_clone_node_iterables(tmpdir):
+ tmpdir.chdir()
+
+ def addstr(string):
+ return ('%s + 2' % string)
+
+ subject_list = ['sub-001', 'sub-002']
+ inputnode = pe.Node(niu.IdentityInterface(fields=['subject']),
+ name='inputnode')
+ inputnode.iterables = [('subject', subject_list)]
+
+ node_1 = pe.Node(niu.Function(input_names='string',
+ output_names='string',
+ function=addstr), name='node_1')
+ node_2 = node_1.clone('node_2')
+
+ workflow = pe.Workflow(name='iter_clone_wf')
+ workflow.connect([(inputnode, node_1, [('subject', 'string')]),
+ (node_1, node_2, [('string', 'string')])])
+ workflow.run()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
configparser==5.2.0
decorator==4.4.2
funcsigs==1.0.2
future==1.0.0
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
lxml==5.3.1
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@d7a8085d9230c4f43489ba93742ea1f6401f3ede#egg=nipype
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
rdflib==5.0.0
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- configparser==5.2.0
- decorator==4.4.2
- funcsigs==1.0.2
- future==1.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- lxml==5.3.1
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- rdflib==5.0.0
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/pipeline/engine/tests/test_base.py::test_clone_node_iterables"
] | [] | [
"nipype/pipeline/engine/tests/test_base.py::test_create[valid1]",
"nipype/pipeline/engine/tests/test_base.py::test_create[valid_node]",
"nipype/pipeline/engine/tests/test_base.py::test_create[valid-node]",
"nipype/pipeline/engine/tests/test_base.py::test_create[ValidNode0]",
"nipype/pipeline/engine/tests/test_base.py::test_create_invalid[invalid*1]",
"nipype/pipeline/engine/tests/test_base.py::test_create_invalid[invalid.1]",
"nipype/pipeline/engine/tests/test_base.py::test_create_invalid[invalid@]",
"nipype/pipeline/engine/tests/test_base.py::test_create_invalid[in/valid]",
"nipype/pipeline/engine/tests/test_base.py::test_create_invalid[None]",
"nipype/pipeline/engine/tests/test_base.py::test_hierarchy",
"nipype/pipeline/engine/tests/test_base.py::test_clone"
] | [] | Apache License 2.0 | 2,134 | 203 | [
"nipype/pipeline/engine/base.py"
] |
|
adamjstewart__fiscalyear-5 | 77c5c0c82a62de36e77284e924f744bb1e770a31 | 2018-02-08 16:57:16 | a59cde7a881a85c5a65e523623e23668c2cb991c | codecov-io: # [Codecov](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5?src=pr&el=h1) Report
> Merging [#5](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5?src=pr&el=desc) into [master](https://codecov.io/gh/adamjstewart/fiscalyear/commit/77c5c0c82a62de36e77284e924f744bb1e770a31?src=pr&el=desc) will **decrease** coverage by `0.43%`.
> The diff coverage is `93.75%`.
[](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #5 +/- ##
==========================================
- Coverage 100% 99.56% -0.44%
==========================================
Files 1 1
Lines 233 229 -4
==========================================
- Hits 233 228 -5
- Misses 0 1 +1
```
| [Impacted Files](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [fiscalyear.py](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5/diff?src=pr&el=tree#diff-ZmlzY2FseWVhci5weQ==) | `99.56% <93.75%> (-0.44%)` | :arrow_down: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5?src=pr&el=footer). Last update [77c5c0c...d01eec3](https://codecov.io/gh/adamjstewart/fiscalyear/pull/5?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/fiscalyear.py b/fiscalyear.py
index c6f8ddc..f3a7640 100644
--- a/fiscalyear.py
+++ b/fiscalyear.py
@@ -24,10 +24,41 @@ START_MONTH = 10
START_DAY = 1
+def _validate_fiscal_calendar_params(start_year, start_month, start_day):
+ """Raise an Exception if the calendar parameters are invalid.
+
+ :param start_year: Relationship between the start of the fiscal year and
+ the calendar year. Possible values: ``'previous'`` or ``'same'``.
+ :type start_year: str
+ :param start_month: The first month of the fiscal year
+ :type start_month: int or str
+ :param start_day: The first day of the first month of the fiscal year
+ :type start_day: int or str
+ :raises TypeError: If ``start_year`` is not a ``str``.
+ :raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
+ :raises ValueError: If ``start_month`` or ``start_day`` is not an int or
+ int-like string
+ :raises ValueError: If ``start_month`` or ``start_day`` is out of range
+ """
+ if not isinstance(start_year, str):
+ raise TypeError("'start_year' must be a 'str', not: '%s'" % type(str))
+ if start_year not in ('previous', 'same'):
+ msg = "'start_year' must be either 'previous' or 'same', not: '%s'"
+ raise ValueError(msg % start_year)
+ _check_day(start_month, start_day)
+
+
+def setup_fiscal_calendar(start_year, start_month, start_day):
+ """Change the global calendar settings."""
+ _validate_fiscal_calendar_params(start_year, start_month, start_day)
+ global START_YEAR, START_MONTH, START_DAY
+ START_YEAR = start_year
+ START_MONTH = start_month
+ START_DAY = start_day
+
+
@contextlib.contextmanager
-def fiscal_calendar(start_year=None,
- start_month=None,
- start_day=None):
+def fiscal_calendar(start_year=None, start_month=None, start_day=None):
"""A context manager that lets you modify the start of the fiscal calendar
inside the scope of a with-statement.
@@ -38,43 +69,22 @@ def fiscal_calendar(start_year=None,
:type start_month: int or str
:param start_day: The first day of the first month of the fiscal year
:type start_day: int or str
- :raises AssertionError: If ``start_year`` is not ``'previous'`` or ``'same'``
- :raises TypeError: If ``start_month`` or ``start_day`` is not an int or int-like string
+ :raises ValueError: If ``start_year`` is not ``'previous'`` or ``'same'``
+ :raises TypeError: If ``start_month`` or ``start_day`` is not an int or
+ int-like string
:raises ValueError: If ``start_month`` or ``start_day`` is out of range
"""
- global START_YEAR
- global START_MONTH
- global START_DAY
-
- # Use default values if not changed
- if start_year is None:
- start_year = START_YEAR
- if start_month is None:
- start_month = START_MONTH
- if start_day is None:
- start_day = START_DAY
-
- assert isinstance(start_year, str)
- assert start_year == 'previous' or start_year == 'same'
- start_month = _check_month(start_month)
- start_day = _check_day(start_month, start_day)
-
- # Backup previous values
- old_start_year = START_YEAR
- old_start_month = START_MONTH
- old_start_day = START_DAY
+ # If arguments are omitted, use the currently active values.
+ start_year = START_YEAR if start_year is None else start_year
+ start_month = START_MONTH if start_month is None else start_month
+ start_day = START_DAY if start_day is None else start_day
# Temporarily change global variables
- START_YEAR = start_year
- START_MONTH = start_month
- START_DAY = start_day
-
+ previous_values = (START_YEAR, START_MONTH, START_DAY)
+ setup_fiscal_calendar(start_year, start_month, start_day)
yield
-
# Restore previous values
- START_YEAR = old_start_year
- START_MONTH = old_start_month
- START_DAY = old_start_day
+ setup_fiscal_calendar(*previous_values)
def _check_int(value):
@@ -225,11 +235,9 @@ class FiscalYear(object):
return self == item
elif isinstance(item, FiscalQuarter):
return self._fiscal_year == item.fiscal_year
- elif (isinstance(item, FiscalDateTime) or
- isinstance(item, datetime.datetime)):
+ elif isinstance(item, datetime.datetime):
return self.start <= item <= self.end
- elif (isinstance(item, FiscalDate) or
- isinstance(item, datetime.date)):
+ elif isinstance(item, datetime.date):
return self.start.date() <= item <= self.end.date()
else:
raise TypeError("can't compare '%s' to '%s'" % (
@@ -405,11 +413,9 @@ class FiscalQuarter(object):
"""
if isinstance(item, FiscalQuarter):
return self == item
- elif (isinstance(item, FiscalDateTime) or
- isinstance(item, datetime.datetime)):
+ elif isinstance(item, datetime.datetime):
return self.start <= item <= self.end
- elif (isinstance(item, FiscalDate) or
- isinstance(item, datetime.date)):
+ elif isinstance(item, datetime.date):
return self.start.date() <= item <= self.end.date()
else:
raise TypeError("can't compare '%s' to '%s'" % (
| Add a function for easily changing the global "START_*" parameters
I think it would make sense to have a function that would make changing the global parameters easier. E.g.
``` python
def setup_fiscal_year(start_year, start_month, start_day):
global START_YEAR, START_MONTH, START_DAY
START_YEAR = start_year
START_MONTH = start_month
START_DAY = start_day
def test_setup_fiscal_year():
# test defaults
day = fiscalyear.FiscalDate(2017, 12, 1)
assert day.fiscal_year == 2018
assert day.quarter == 1
# change fiscal year settings
fiscalyear.setup_fiscal_year("same", 1, 1)
assert day.fiscal_year == 2017
assert day.quarter == 4
# restore defaults and re-test
fiscalyear.setup_fiscal_year("previous", 10, 1)
assert day.fiscal_year == 2018
assert day.quarter == 1
```
This could also make it possible to change the Fiscal Year settings even if you don't import the whole module. E.g.
``` python
In [4]: from fiscalyear import FiscalQuarter, setup_fiscal_year
In [5]: quarter = FiscalQuarter(2018, 1)
In [6]: quarter.start
Out[6]: FiscalDateTime(2017, 10, 1, 0, 0)
In [7]: setup_fiscal_year('same', 1, 1)
In [8]: quarter.start
Out[8]: FiscalDateTime(2018, 1, 1, 0, 0)
``` | adamjstewart/fiscalyear | diff --git a/test_fiscalyear.py b/test_fiscalyear.py
index 690835f..5b2779f 100644
--- a/test_fiscalyear.py
+++ b/test_fiscalyear.py
@@ -9,8 +9,143 @@ import pytest
US_FEDERAL = ('previous', 10, 1)
UK_PERSONAL = ('same', 4, 6)
-# Default to U.S.
-fiscalyear.START_YEAR, fiscalyear.START_MONTH, fiscalyear.START_DAY = US_FEDERAL
+
+class TestCheckInt(object):
+ @pytest.mark.parametrize("value, exception", [
+ ('asdf', TypeError),
+ ("-999", TypeError),
+ # Technically speaking, _check_int should accept negative integers
+ # but this isn't a public function + datetime doesn't handle them
+ # anyway.
+ (float(), TypeError),
+ (object(), TypeError),
+ ])
+ def test_invalid_input(self, value, exception):
+ with pytest.raises(exception):
+ fiscalyear._check_int(value)
+
+ @pytest.mark.parametrize("value", [1, 2, 0, -1, -2, "1", "0", "999"])
+ def test_valid_input(self, value):
+ assert int(value) == fiscalyear._check_int(value)
+
+
+class TestCheckYear(object):
+ @pytest.mark.parametrize("value, exception", [
+ ('asdf', TypeError),
+ (float(), TypeError),
+ (object(), TypeError),
+ ("-1", TypeError),
+ (-1, ValueError),
+ (0, ValueError),
+ ("0", ValueError),
+ (10000, ValueError),
+ ("10000", ValueError),
+ ])
+ def test_invalid_input(self, value, exception):
+ with pytest.raises(exception):
+ fiscalyear._check_year(value)
+
+ @pytest.mark.parametrize("value", [1, 2, "1", "999"])
+ def test_valid_input(self, value):
+ assert int(value) == fiscalyear._check_year(value)
+
+
+class TestCheckDay(object):
+ @pytest.mark.parametrize("month, day, exception", [
+ (1, 'asdf', TypeError),
+ (1, "-999", TypeError),
+ (1, float(), TypeError),
+ (1, object(), TypeError),
+ (1, -1, ValueError),
+ (1, "-1", TypeError),
+ (1, 0, ValueError),
+ (1, "0", ValueError),
+ (1, 32, ValueError),
+ (1, 32, ValueError),
+ ])
+ def test_invalid_input(self, month, day, exception):
+ with pytest.raises(exception):
+ fiscalyear._check_day(month, day)
+
+ @pytest.mark.parametrize("month, day", [(1, 1), (1, 2), (1, "1"), (1, 31), (1, "31")])
+ def test_valid_input(self, month, day):
+ assert int(day) == fiscalyear._check_day(month, day)
+
+
+class TestCheckQuarter(object):
+ @pytest.mark.parametrize("value, exception", [
+ ('asdf', TypeError),
+ (float(), TypeError),
+ (object(), TypeError),
+ ("-1", TypeError),
+ (-1, ValueError),
+ (0, ValueError),
+ ("0", ValueError),
+ (5, ValueError),
+ ("5", ValueError),
+ ])
+ def test_invalid_input(self, value, exception):
+ with pytest.raises(exception):
+ fiscalyear._check_quarter(value)
+
+ @pytest.mark.parametrize("value", [1, 2, "1", "4"])
+ def test_valid_input(self, value):
+ assert int(value) == fiscalyear._check_quarter(value)
+
+
+class TestCalendarSettingsValidator(object):
+ @pytest.mark.parametrize("arguments, exception", [
+ (dict(start_year='asdf', start_month=12, start_day=1), ValueError),
+ (dict(start_year=float(1999), start_month=12, start_day=1), TypeError),
+ (dict(start_year=object(), start_month=12, start_day=1), TypeError),
+
+ (dict(start_year='same', start_month='asdf', start_day=1), TypeError),
+ (dict(start_year='same', start_month=float(12), start_day=1), TypeError),
+ (dict(start_year='same', start_month=object(), start_day=1), TypeError),
+ (dict(start_year='same', start_month=-1, start_day=1), ValueError),
+ (dict(start_year='same', start_month=0, start_day=1), ValueError),
+ (dict(start_year='same', start_month=13, start_day=1), ValueError),
+
+ (dict(start_year='same', start_month=12, start_day='asdf'), TypeError),
+ (dict(start_year='same', start_month=12, start_day=float(1)), TypeError),
+ (dict(start_year='same', start_month=12, start_day=object()), TypeError),
+ (dict(start_year='same', start_month=12, start_day=0), ValueError),
+ (dict(start_year='same', start_month=12, start_day=-1), ValueError),
+ (dict(start_year='same', start_month=12, start_day=32), ValueError),
+ ])
+ def test_invalid_input(self, arguments, exception):
+ with pytest.raises(exception):
+ fiscalyear._validate_fiscal_calendar_params(**arguments)
+
+ @pytest.mark.parametrize("arguments", [
+ dict(start_year='same', start_month=1, start_day=1),
+ dict(start_year='same', start_month=1, start_day=31),
+ dict(start_year='same', start_month=12, start_day=1),
+ dict(start_year='previous', start_month=1, start_day=1),
+ dict(start_year='previous', start_month=1, start_day=31),
+ dict(start_year='previous', start_month=12, start_day=1),
+ ])
+ def test_valid_input(self, arguments):
+ fiscalyear._validate_fiscal_calendar_params(**arguments)
+
+
+class TestSetupFiscalCalendar(object):
+
+ def test_setup_fiscal_calendar(self):
+ # Test defaults
+ day = fiscalyear.FiscalDate(2017, 12, 1)
+ assert day.fiscal_year == 2018
+ assert day.quarter == 1
+
+ # Change fiscal year settings
+ fiscalyear.setup_fiscal_calendar("same", 1, 1)
+ assert day.fiscal_year == 2017
+ assert day.quarter == 4
+
+ # Restore defaults and re-test
+ fiscalyear.setup_fiscal_calendar("previous", 10, 1)
+ assert day.fiscal_year == 2018
+ assert day.quarter == 1
class TestFiscalCalendar:
@@ -86,6 +221,10 @@ class TestFiscalCalendar:
assert fiscalyear.START_DAY == 1
def test_wrong_type(self):
+ with pytest.raises(TypeError):
+ with fiscalyear.fiscal_calendar(start_year=6.5):
+ pass
+
with pytest.raises(TypeError):
with fiscalyear.fiscal_calendar(start_month=6.5):
pass
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
docutils==0.21.2
exceptiongroup==1.2.2
-e git+https://github.com/adamjstewart/fiscalyear.git@77c5c0c82a62de36e77284e924f744bb1e770a31#egg=fiscalyear
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
packaging==24.2
pluggy==1.5.0
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-runner==6.0.1
requests==2.32.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
urllib3==2.3.0
zipp==3.21.0
| name: fiscalyear
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- docutils==0.21.2
- exceptiongroup==1.2.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- packaging==24.2
- pluggy==1.5.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-runner==6.0.1
- requests==2.32.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/fiscalyear
| [
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments0-ValueError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments1-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments2-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments3-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments4-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments5-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments6-ValueError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments7-ValueError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments8-ValueError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments9-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments10-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments11-TypeError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments12-ValueError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments13-ValueError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_invalid_input[arguments14-ValueError]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_valid_input[arguments0]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_valid_input[arguments1]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_valid_input[arguments2]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_valid_input[arguments3]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_valid_input[arguments4]",
"test_fiscalyear.py::TestCalendarSettingsValidator::test_valid_input[arguments5]",
"test_fiscalyear.py::TestSetupFiscalCalendar::test_setup_fiscal_calendar",
"test_fiscalyear.py::TestFiscalCalendar::test_wrong_type"
] | [] | [
"test_fiscalyear.py::TestCheckInt::test_invalid_input[asdf-TypeError]",
"test_fiscalyear.py::TestCheckInt::test_invalid_input[-999-TypeError]",
"test_fiscalyear.py::TestCheckInt::test_invalid_input[0.0-TypeError]",
"test_fiscalyear.py::TestCheckInt::test_invalid_input[value3-TypeError]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[1_0]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[2]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[0_0]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[-1]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[-2]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[1_1]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[0_1]",
"test_fiscalyear.py::TestCheckInt::test_valid_input[999]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[asdf-TypeError]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[0.0-TypeError]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[value2-TypeError]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[-1-TypeError]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[-1-ValueError]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[0-ValueError0]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[0-ValueError1]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[10000-ValueError0]",
"test_fiscalyear.py::TestCheckYear::test_invalid_input[10000-ValueError1]",
"test_fiscalyear.py::TestCheckYear::test_valid_input[1_0]",
"test_fiscalyear.py::TestCheckYear::test_valid_input[2]",
"test_fiscalyear.py::TestCheckYear::test_valid_input[1_1]",
"test_fiscalyear.py::TestCheckYear::test_valid_input[999]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1-asdf-TypeError]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1--999-TypeError]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1-0.0-TypeError]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1-day3-TypeError]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1--1-ValueError]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1--1-TypeError]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1-0-ValueError0]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1-0-ValueError1]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1-32-ValueError0]",
"test_fiscalyear.py::TestCheckDay::test_invalid_input[1-32-ValueError1]",
"test_fiscalyear.py::TestCheckDay::test_valid_input[1-1_0]",
"test_fiscalyear.py::TestCheckDay::test_valid_input[1-2]",
"test_fiscalyear.py::TestCheckDay::test_valid_input[1-1_1]",
"test_fiscalyear.py::TestCheckDay::test_valid_input[1-31_0]",
"test_fiscalyear.py::TestCheckDay::test_valid_input[1-31_1]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[asdf-TypeError]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[0.0-TypeError]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[value2-TypeError]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[-1-TypeError]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[-1-ValueError]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[0-ValueError0]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[0-ValueError1]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[5-ValueError0]",
"test_fiscalyear.py::TestCheckQuarter::test_invalid_input[5-ValueError1]",
"test_fiscalyear.py::TestCheckQuarter::test_valid_input[1_0]",
"test_fiscalyear.py::TestCheckQuarter::test_valid_input[2]",
"test_fiscalyear.py::TestCheckQuarter::test_valid_input[1_1]",
"test_fiscalyear.py::TestCheckQuarter::test_valid_input[4]",
"test_fiscalyear.py::TestFiscalCalendar::test_start_year",
"test_fiscalyear.py::TestFiscalCalendar::test_start_month",
"test_fiscalyear.py::TestFiscalCalendar::test_start_day",
"test_fiscalyear.py::TestFiscalCalendar::test_complex",
"test_fiscalyear.py::TestFiscalCalendar::test_nested",
"test_fiscalyear.py::TestFiscalCalendar::test_out_of_range",
"test_fiscalyear.py::TestFiscalYear::test_basic",
"test_fiscalyear.py::TestFiscalYear::test_repr",
"test_fiscalyear.py::TestFiscalYear::test_str",
"test_fiscalyear.py::TestFiscalYear::test_from_string",
"test_fiscalyear.py::TestFiscalYear::test_wrong_type",
"test_fiscalyear.py::TestFiscalYear::test_out_of_range",
"test_fiscalyear.py::TestFiscalYear::test_prev_fiscal_year",
"test_fiscalyear.py::TestFiscalYear::test_next_fiscal_year",
"test_fiscalyear.py::TestFiscalYear::test_start",
"test_fiscalyear.py::TestFiscalYear::test_end",
"test_fiscalyear.py::TestFiscalYear::test_q1",
"test_fiscalyear.py::TestFiscalYear::test_q2",
"test_fiscalyear.py::TestFiscalYear::test_q3",
"test_fiscalyear.py::TestFiscalYear::test_q4",
"test_fiscalyear.py::TestFiscalYear::test_contains",
"test_fiscalyear.py::TestFiscalYear::test_less_than",
"test_fiscalyear.py::TestFiscalYear::test_less_than_equals",
"test_fiscalyear.py::TestFiscalYear::test_equals",
"test_fiscalyear.py::TestFiscalYear::test_not_equals",
"test_fiscalyear.py::TestFiscalYear::test_greater_than",
"test_fiscalyear.py::TestFiscalYear::test_greater_than_equals",
"test_fiscalyear.py::TestFiscalQuarter::test_basic",
"test_fiscalyear.py::TestFiscalQuarter::test_repr",
"test_fiscalyear.py::TestFiscalQuarter::test_str",
"test_fiscalyear.py::TestFiscalQuarter::test_from_string",
"test_fiscalyear.py::TestFiscalQuarter::test_wrong_type",
"test_fiscalyear.py::TestFiscalQuarter::test_out_of_range",
"test_fiscalyear.py::TestFiscalQuarter::test_prev_quarter",
"test_fiscalyear.py::TestFiscalQuarter::test_next_quarter",
"test_fiscalyear.py::TestFiscalQuarter::test_start",
"test_fiscalyear.py::TestFiscalQuarter::test_end",
"test_fiscalyear.py::TestFiscalQuarter::test_bad_start_year",
"test_fiscalyear.py::TestFiscalQuarter::test_q1_start",
"test_fiscalyear.py::TestFiscalQuarter::test_q1_end",
"test_fiscalyear.py::TestFiscalQuarter::test_q2_start",
"test_fiscalyear.py::TestFiscalQuarter::test_q2_end",
"test_fiscalyear.py::TestFiscalQuarter::test_q3_start",
"test_fiscalyear.py::TestFiscalQuarter::test_q3_end",
"test_fiscalyear.py::TestFiscalQuarter::test_q4_start",
"test_fiscalyear.py::TestFiscalQuarter::test_q4_end",
"test_fiscalyear.py::TestFiscalQuarter::test_contains",
"test_fiscalyear.py::TestFiscalQuarter::test_less_than",
"test_fiscalyear.py::TestFiscalQuarter::test_less_than_equals",
"test_fiscalyear.py::TestFiscalQuarter::test_equals",
"test_fiscalyear.py::TestFiscalQuarter::test_not_equals",
"test_fiscalyear.py::TestFiscalQuarter::test_greater_than",
"test_fiscalyear.py::TestFiscalQuarter::test_greater_than_equals",
"test_fiscalyear.py::TestFiscalDate::test_basic",
"test_fiscalyear.py::TestFiscalDate::test_fiscal_year",
"test_fiscalyear.py::TestFiscalDate::test_prev_fiscal_year",
"test_fiscalyear.py::TestFiscalDate::test_next_fiscal_year",
"test_fiscalyear.py::TestFiscalDate::test_prev_quarter",
"test_fiscalyear.py::TestFiscalDate::test_next_quarter",
"test_fiscalyear.py::TestFiscalDateTime::test_basic",
"test_fiscalyear.py::TestFiscalDateTime::test_fiscal_year",
"test_fiscalyear.py::TestFiscalDateTime::test_prev_fiscal_year",
"test_fiscalyear.py::TestFiscalDateTime::test_next_fiscal_year",
"test_fiscalyear.py::TestFiscalDateTime::test_prev_quarter",
"test_fiscalyear.py::TestFiscalDateTime::test_next_quarter"
] | [] | MIT License | 2,135 | 1,357 | [
"fiscalyear.py"
] |
pytorch__ignite-69 | a0235df55650ec5368c0cd9f84a3a34b92c37273 | 2018-02-09 11:11:39 | a0235df55650ec5368c0cd9f84a3a34b92c37273 | diff --git a/ignite/engine.py b/ignite/engine.py
index f81753a1..aa7b2357 100644
--- a/ignite/engine.py
+++ b/ignite/engine.py
@@ -111,15 +111,16 @@ class Engine(object):
try:
start_time = time.time()
for batch in dataset:
+ self.current_iteration += 1
self._fire_event(Events.ITERATION_STARTED)
step_result = self._process_function(batch)
if step_result is not None:
self.history.append(step_result)
- self.current_iteration += 1
self._fire_event(Events.ITERATION_COMPLETED)
if self.should_terminate:
break
+
time_taken = time.time() - start_time
hours, mins, secs = _to_hours_mins_secs(time_taken)
return hours, mins, secs
diff --git a/ignite/trainer.py b/ignite/trainer.py
index e06805af..f5f33c6c 100644
--- a/ignite/trainer.py
+++ b/ignite/trainer.py
@@ -67,12 +67,12 @@ class Trainer(Engine):
self._fire_event(Events.STARTED)
while self.current_epoch < max_epochs and not self.should_terminate:
+ self.current_epoch += 1
self._fire_event(Events.EPOCH_STARTED)
self._train_one_epoch(training_data)
if self.should_terminate:
break
self._fire_event(Events.EPOCH_COMPLETED)
- self.current_epoch += 1
self._fire_event(Events.COMPLETED)
time_taken = time.time() - start_time
| Start current_epoch + current_iteration from 1 instead of 0.
Also increment these counters *after* the `ITERATION_COMPLETE`/`EPOCH_COMPLETE` events.
See discussion in #59 for more details | pytorch/ignite | diff --git a/tests/ignite/test_evaluator.py b/tests/ignite/test_evaluator.py
index b0c54408..4450e93e 100644
--- a/tests/ignite/test_evaluator.py
+++ b/tests/ignite/test_evaluator.py
@@ -14,7 +14,7 @@ def test_current_validation_iteration_counter_increases_every_iteration():
class IterationCounter(object):
def __init__(self):
- self.current_iteration_count = 0
+ self.current_iteration_count = 1
self.total_count = 0
def __call__(self, evaluator):
@@ -23,7 +23,7 @@ def test_current_validation_iteration_counter_increases_every_iteration():
self.total_count += 1
def clear(self):
- self.current_iteration_count = 0
+ self.current_iteration_count = 1
iteration_counter = IterationCounter()
@@ -71,15 +71,15 @@ def test_terminate_stops_evaluator_when_called_during_iteration():
iteration_to_stop = 3 # i.e. part way through the 3rd validation run
evaluator = Evaluator(MagicMock(return_value=1))
- def end_of_iteration_handler(evaluator):
+ def start_of_iteration_handler(evaluator):
if evaluator.current_iteration == iteration_to_stop:
evaluator.terminate()
- evaluator.add_event_handler(Events.ITERATION_STARTED, end_of_iteration_handler)
+ evaluator.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
evaluator.run([None] * num_iterations)
- # should complete the iteration when terminate called
- assert evaluator.current_iteration == iteration_to_stop + 1
+ # should complete the iteration when terminate called but not increment counter
+ assert evaluator.current_iteration == iteration_to_stop
def test_create_supervised():
diff --git a/tests/ignite/test_trainer.py b/tests/ignite/test_trainer.py
index b7b96177..8c3117c1 100644
--- a/tests/ignite/test_trainer.py
+++ b/tests/ignite/test_trainer.py
@@ -60,7 +60,7 @@ def test_current_epoch_counter_increases_every_epoch():
class EpochCounter(object):
def __init__(self):
- self.current_epoch_count = 0
+ self.current_epoch_count = 1
def __call__(self, trainer):
assert trainer.current_epoch == self.current_epoch_count
@@ -80,7 +80,7 @@ def test_current_iteration_counter_increases_every_iteration():
class IterationCounter(object):
def __init__(self):
- self.current_iteration_count = 0
+ self.current_iteration_count = 1
def __call__(self, trainer):
assert trainer.current_iteration == self.current_iteration_count
@@ -115,7 +115,7 @@ def test_terminate_at_end_of_epoch_stops_training():
trainer.run([1], max_epochs=max_epochs)
- assert trainer.current_epoch == last_epoch_to_run + 1 # counter is incremented at end of loop
+ assert trainer.current_epoch == last_epoch_to_run
assert trainer.should_terminate
@@ -139,24 +139,23 @@ def test_terminate_at_start_of_epoch_stops_training_after_completing_iteration()
assert trainer.current_epoch == epoch_to_terminate_on
assert trainer.should_terminate
# completes first iteration
- assert trainer.current_iteration == (epoch_to_terminate_on * len(batches_per_epoch)) + 1
+ assert trainer.current_iteration == ((epoch_to_terminate_on - 1) * len(batches_per_epoch)) + 1
def test_terminate_stops_training_mid_epoch():
num_iterations_per_epoch = 10
- iteration_to_stop = num_iterations_per_epoch + 3 # i.e. part way through the 2nd epoch
+ iteration_to_stop = num_iterations_per_epoch + 3 # i.e. part way through the 3rd epoch
trainer = Trainer(MagicMock(return_value=1))
- def end_of_iteration_handler(trainer):
+ def start_of_iteration_handler(trainer):
if trainer.current_iteration == iteration_to_stop:
trainer.terminate()
- trainer.add_event_handler(Events.ITERATION_STARTED, end_of_iteration_handler)
+ trainer.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
trainer.run(training_data=[None] * num_iterations_per_epoch, max_epochs=3)
- assert (trainer.current_iteration == iteration_to_stop +
- 1) # completes the iteration when terminate called
- assert trainer.current_epoch == np.ceil(
- iteration_to_stop / num_iterations_per_epoch) - 1 # it starts from 0
+ # completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
+ assert (trainer.current_iteration == iteration_to_stop)
+ assert trainer.current_epoch == np.ceil(iteration_to_stop / num_iterations_per_epoch) # it starts from 0
def _create_mock_data_loader(epochs, batches_per_epoch):
@@ -199,7 +198,7 @@ def test_training_iteration_events_are_fired():
assert mock_manager.mock_calls == expected_calls
-def test_create_supervised():
+def test_create_supervised_trainer():
model = Linear(1, 1)
model.weight.data.zero_()
model.bias.data.zero_()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"numpy",
"mock",
"pytest",
"codecov",
"pytest-cov",
"tqdm",
"scikit-learn",
"visdom",
"torchvision",
"tensorboardX",
"gym"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
cloudpickle==2.2.1
codecov==2.1.13
coverage==6.2
dataclasses==0.8
decorator==4.4.2
enum34==1.1.10
gym==0.26.2
gym-notices==0.0.8
idna==3.10
-e git+https://github.com/pytorch/ignite.git@a0235df55650ec5368c0cd9f84a3a34b92c37273#egg=ignite
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
joblib==1.1.1
jsonpatch==1.32
jsonpointer==2.3
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
networkx==2.5.1
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
protobuf==4.21.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
requests==2.27.1
scikit-learn==0.24.2
scipy==1.5.4
six==1.17.0
tensorboardX==2.6.2.2
threadpoolctl==3.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
torch==1.10.1
torchvision==0.11.2
tornado==6.1
tqdm==4.64.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
visdom==0.2.4
websocket-client==1.3.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ignite
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- cloudpickle==2.2.1
- codecov==2.1.13
- coverage==6.2
- dataclasses==0.8
- decorator==4.4.2
- enum34==1.1.10
- gym==0.26.2
- gym-notices==0.0.8
- idna==3.10
- importlib-resources==5.4.0
- joblib==1.1.1
- jsonpatch==1.32
- jsonpointer==2.3
- mock==5.2.0
- networkx==2.5.1
- numpy==1.19.5
- pillow==8.4.0
- protobuf==4.21.0
- pytest-cov==4.0.0
- requests==2.27.1
- scikit-learn==0.24.2
- scipy==1.5.4
- six==1.17.0
- tensorboardx==2.6.2.2
- threadpoolctl==3.1.0
- tomli==1.2.3
- torch==1.10.1
- torchvision==0.11.2
- tornado==6.1
- tqdm==4.64.1
- urllib3==1.26.20
- visdom==0.2.4
- websocket-client==1.3.1
prefix: /opt/conda/envs/ignite
| [
"tests/ignite/test_evaluator.py::test_current_validation_iteration_counter_increases_every_iteration",
"tests/ignite/test_evaluator.py::test_terminate_stops_evaluator_when_called_during_iteration",
"tests/ignite/test_trainer.py::test_current_epoch_counter_increases_every_epoch",
"tests/ignite/test_trainer.py::test_current_iteration_counter_increases_every_iteration",
"tests/ignite/test_trainer.py::test_terminate_at_end_of_epoch_stops_training",
"tests/ignite/test_trainer.py::test_terminate_at_start_of_epoch_stops_training_after_completing_iteration",
"tests/ignite/test_trainer.py::test_terminate_stops_training_mid_epoch"
] | [
"tests/ignite/test_trainer.py::test_create_supervised_trainer"
] | [
"tests/ignite/test_evaluator.py::test_evaluation_iteration_events_are_fired",
"tests/ignite/test_evaluator.py::test_create_supervised",
"tests/ignite/test_trainer.py::test_exception_handler_called_on_error",
"tests/ignite/test_trainer.py::test_stopping_criterion_is_max_epochs",
"tests/ignite/test_trainer.py::test_training_iteration_events_are_fired"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,136 | 385 | [
"ignite/engine.py",
"ignite/trainer.py"
] |
|
Azure__WALinuxAgent-1039 | 53a429b06b67031d30351b45e798ec204484b8ef | 2018-02-12 05:42:23 | 6e9b985c1d7d564253a1c344bab01b45093103cd | diff --git a/azurelinuxagent/common/event.py b/azurelinuxagent/common/event.py
index 84a439f5..7fc084d9 100644
--- a/azurelinuxagent/common/event.py
+++ b/azurelinuxagent/common/event.py
@@ -254,7 +254,11 @@ __event_logger__ = EventLogger()
def elapsed_milliseconds(utc_start):
- d = datetime.utcnow() - utc_start
+ now = datetime.utcnow()
+ if now < utc_start:
+ return 0
+
+ d = now - utc_start
return int(((d.days * 24 * 60 * 60 + d.seconds) * 1000) + \
(d.microseconds / 1000.0))
| The ProcessGoalState Duration is Too Large
The Duration telemetry value exceeds the size of an Int64, which does not make sense. This has been seen in at least two different agent versions (2.1.3 and 2.2.21). | Azure/WALinuxAgent | diff --git a/tests/common/test_event.py b/tests/common/test_event.py
index 01bcd7b9..4d9afeff 100644
--- a/tests/common/test_event.py
+++ b/tests/common/test_event.py
@@ -17,14 +17,10 @@
from __future__ import print_function
-from datetime import datetime
-
-import azurelinuxagent.common.event as event
-import azurelinuxagent.common.logger as logger
+from datetime import datetime, timedelta
from azurelinuxagent.common.event import add_event, \
- mark_event_status, should_emit_event, \
- WALAEventOperation
+ WALAEventOperation, elapsed_milliseconds
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.version import CURRENT_VERSION
@@ -217,3 +213,7 @@ class TestEvent(AgentTestCase):
with open(last_event) as last_fh:
last_event_text = last_fh.read()
self.assertTrue('last event' in last_event_text)
+
+ def test_elapsed_milliseconds(self):
+ utc_start = datetime.utcnow() + timedelta(days=1)
+ self.assertEqual(0, elapsed_milliseconds(utc_start))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pyasn1"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
certifi @ file:///croot/certifi_1671487769961/work/certifi
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1671697413597/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pytest==7.1.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
-e git+https://github.com/Azure/WALinuxAgent.git@53a429b06b67031d30351b45e798ec204484b8ef#egg=WALinuxAgent
zipp @ file:///croot/zipp_1672387121353/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib-metadata=4.11.3=py37h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/common/test_event.py::TestEvent::test_elapsed_milliseconds"
] | [] | [
"tests/common/test_event.py::TestEvent::test_event_status_defaults_to_success",
"tests/common/test_event.py::TestEvent::test_event_status_event_marked",
"tests/common/test_event.py::TestEvent::test_event_status_preserves_state",
"tests/common/test_event.py::TestEvent::test_event_status_records_status",
"tests/common/test_event.py::TestEvent::test_periodic_does_not_emit_if_previously_sent",
"tests/common/test_event.py::TestEvent::test_periodic_emits_after_elapsed_delta",
"tests/common/test_event.py::TestEvent::test_periodic_emits_if_forced",
"tests/common/test_event.py::TestEvent::test_periodic_emits_if_not_previously_sent",
"tests/common/test_event.py::TestEvent::test_periodic_forwards_args",
"tests/common/test_event.py::TestEvent::test_save_event",
"tests/common/test_event.py::TestEvent::test_save_event_cleanup",
"tests/common/test_event.py::TestEvent::test_save_event_rollover",
"tests/common/test_event.py::TestEvent::test_should_emit_event_handles_known_operations",
"tests/common/test_event.py::TestEvent::test_should_emit_event_ignores_unknown_operations"
] | [] | Apache License 2.0 | 2,146 | 186 | [
"azurelinuxagent/common/event.py"
] |
|
ELIFE-ASU__Neet-93 | fbc1f937c407974c04c847d44517fea5ea76176f | 2018-02-13 20:59:01 | fbc1f937c407974c04c847d44517fea5ea76176f | diff --git a/neet/boolean/logicnetwork.py b/neet/boolean/logicnetwork.py
index 69f44fc..9f1123b 100644
--- a/neet/boolean/logicnetwork.py
+++ b/neet/boolean/logicnetwork.py
@@ -2,6 +2,7 @@
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
import re
+from neet.python3 import *
from neet.statespace import StateSpace
from neet.exceptions import FormatError
@@ -88,7 +89,7 @@ class LogicNetwork(object):
raise ValueError("Invalid table format")
conditions = set()
for condition in row[1]:
- conditions.add(''.join([str(int(s)) for s in condition]))
+ conditions.add(''.join([str(long(s)) for s in condition]))
self.table.append((row[0], conditions))
if reduced:
@@ -105,15 +106,15 @@ class LogicNetwork(object):
self._encoded_table = []
for indices, conditions in self.table:
# Encode the mask.
- mask_code = 0
+ mask_code = long(0)
for idx in indices:
mask_code += 2 ** idx # Low order, low index.
# Encode each condition of truth table.
encoded_sub_table = set()
for condition in conditions:
- encoded_condition = 0
+ encoded_condition = long(0)
for idx, state in zip(indices, condition):
- encoded_condition += 2 ** idx if int(state) else 0
+ encoded_condition += 2 ** idx if long(state) else 0
encoded_sub_table.add(encoded_condition)
self._encoded_table.append((mask_code, encoded_sub_table))
@@ -134,7 +135,7 @@ class LogicNetwork(object):
for state in sub_table[1]:
# State excluding source.
state_sans_source = state[:i] + state[i + 1:]
- if int(state[i]) == 1:
+ if long(state[i]) == 1:
counter[state_sans_source] = counter.get(
state_sans_source, 0) + 1
else:
diff --git a/neet/python3.py b/neet/python3.py
new file mode 100644
index 0000000..b3b118d
--- /dev/null
+++ b/neet/python3.py
@@ -0,0 +1,9 @@
+# Copyright 2018 ELIFE. All rights reserved.
+# Use of this source code is governed by a MIT
+# license that can be found in the LICENSE file.
+
+import sys
+if sys.version_info > (3,):
+ long = int
+ unicode = str
+
diff --git a/neet/statespace.py b/neet/statespace.py
index fc8230b..609c2c8 100644
--- a/neet/statespace.py
+++ b/neet/statespace.py
@@ -1,6 +1,7 @@
# Copyright 2016-2017 ELIFE. All rights reserved.
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
+from .python3 import *
import numpy as np
class StateSpace(object):
@@ -201,7 +202,7 @@ class StateSpace(object):
:returns: a unique integer encoding of the state
:raises ValueError: if ``state`` has an incorrect length
"""
- encoded, place = 0, 1
+ encoded, place = long(0), long(1)
base = self.__base
if self.is_uniform:
@@ -213,7 +214,7 @@ class StateSpace(object):
encoded += place * x
place *= b
- return encoded
+ return long(encoded)
def encode(self, state):
"""
| logic network update function fails with large number of nodes
Above about 62 nodes, the update function for logic networks fails due to an issue with encoding states of the network. | ELIFE-ASU/Neet | diff --git a/test/test_asynchronous.py b/test/test_asynchronous.py
index 2dce250..5b6b7b1 100644
--- a/test/test_asynchronous.py
+++ b/test/test_asynchronous.py
@@ -2,6 +2,7 @@
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
import unittest
+from neet.python3 import *
from neet.asynchronous import transitions
from neet.automata import ECA
from neet.boolean.examples import s_pombe, s_cerevisiae, c_elegans
@@ -68,7 +69,7 @@ class TestAsync(unittest.TestCase):
for net in [s_pombe, s_cerevisiae, c_elegans]:
for states, _ in transitions(net, encoded=True):
for state in states:
- self.assertIsInstance(state, int)
+ self.assertIsInstance(state, (int, long))
for states, _ in transitions(net, encoded=False):
for state in states:
self.assertIsInstance(state, list)
@@ -77,7 +78,7 @@ class TestAsync(unittest.TestCase):
for size in [5, 8, 10]:
for states, _ in transitions(net, size, encoded=True):
for state in states:
- self.assertIsInstance(state, int)
+ self.assertIsInstance(state, (int, long))
for states, _ in transitions(net, size, encoded=False):
for state in states:
self.assertIsInstance(state, list)
diff --git a/test/test_statespace.py b/test/test_statespace.py
index 813597d..80b77f3 100644
--- a/test/test_statespace.py
+++ b/test/test_statespace.py
@@ -2,10 +2,10 @@
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
import unittest
+from neet.python3 import *
from neet.statespace import StateSpace
import numpy as np
-
class TestStateSpace(unittest.TestCase):
def test_invalid_spec_type(self):
with self.assertRaises(TypeError):
@@ -277,3 +277,17 @@ class TestStateSpace(unittest.TestCase):
self.assertFalse([0, 2, 1] not in StateSpace([2, 3, 2]))
self.assertTrue([0, 1] not in StateSpace([2, 2, 3]))
self.assertTrue([1, 1, 6] not in StateSpace([2, 3, 4]))
+
+ def test_long_encoding(self):
+ state_space = StateSpace(10)
+ code = state_space.encode(np.ones(10, dtype=int))
+ print(type(code))
+ self.assertIsInstance(code, long)
+
+ state_space = StateSpace(68)
+ code = state_space.encode(np.ones(68, dtype=int))
+ self.assertIsInstance(code, long)
+
+ state_space = StateSpace(100)
+ code = state_space.encode(np.ones(100, dtype=int))
+ self.assertIsInstance(code, long)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==4.4.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/ELIFE-ASU/Neet.git@fbc1f937c407974c04c847d44517fea5ea76176f#egg=neet
networkx==2.5.1
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyinform==0.2.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: Neet
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==4.4.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- networkx==2.5.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyinform==0.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/Neet
| [
"test/test_asynchronous.py::TestAsync::test_canary",
"test/test_asynchronous.py::TestAsync::test_transitions_encoded",
"test/test_asynchronous.py::TestAsync::test_transitions_not_network",
"test/test_asynchronous.py::TestAsync::test_transitions_require_update",
"test/test_asynchronous.py::TestAsync::test_transitions_sum_to_one",
"test/test_statespace.py::TestStateSpace::test_base_mismatch",
"test/test_statespace.py::TestStateSpace::test_check_states_uniform",
"test/test_statespace.py::TestStateSpace::test_check_states_varied",
"test/test_statespace.py::TestStateSpace::test_decode_encode_nonuniform",
"test/test_statespace.py::TestStateSpace::test_decode_encode_uniform",
"test/test_statespace.py::TestStateSpace::test_decoding_nonuniform",
"test/test_statespace.py::TestStateSpace::test_decoding_uniform",
"test/test_statespace.py::TestStateSpace::test_encode_decode_nonuniform",
"test/test_statespace.py::TestStateSpace::test_encode_decode_uniform",
"test/test_statespace.py::TestStateSpace::test_encoding_error",
"test/test_statespace.py::TestStateSpace::test_encoding_nonuniform",
"test/test_statespace.py::TestStateSpace::test_encoding_uniform",
"test/test_statespace.py::TestStateSpace::test_invalid_base_type",
"test/test_statespace.py::TestStateSpace::test_invalid_base_value",
"test/test_statespace.py::TestStateSpace::test_invalid_spec_type",
"test/test_statespace.py::TestStateSpace::test_invalid_spec_value",
"test/test_statespace.py::TestStateSpace::test_long_encoding",
"test/test_statespace.py::TestStateSpace::test_nonuniform_bases",
"test/test_statespace.py::TestStateSpace::test_states_boolean",
"test/test_statespace.py::TestStateSpace::test_states_boolean_list",
"test/test_statespace.py::TestStateSpace::test_states_count",
"test/test_statespace.py::TestStateSpace::test_states_nonboolean",
"test/test_statespace.py::TestStateSpace::test_states_nonboolean_list",
"test/test_statespace.py::TestStateSpace::test_uniform_bases"
] | [] | [] | [] | MIT License | 2,155 | 906 | [
"neet/boolean/logicnetwork.py",
"neet/statespace.py"
] |
|
ARMmbed__greentea-263 | 68508c5f4d7cf0635c75399d0ff7cfa896fdf2cc | 2018-02-15 17:29:56 | 68508c5f4d7cf0635c75399d0ff7cfa896fdf2cc | diff --git a/mbed_greentea/mbed_greentea_cli.py b/mbed_greentea/mbed_greentea_cli.py
index f6a13c4..446b965 100644
--- a/mbed_greentea/mbed_greentea_cli.py
+++ b/mbed_greentea/mbed_greentea_cli.py
@@ -23,6 +23,7 @@ import os
import sys
import random
import optparse
+import fnmatch
from time import time
try:
from Queue import Queue
@@ -119,18 +120,6 @@ def create_filtered_test_list(ctest_test_list, test_by_names, skip_test, test_sp
@return
"""
- def filter_names_by_prefix(test_case_name_list, prefix_name):
- """!
- @param test_case_name_list List of all test cases
- @param prefix_name Prefix of test name we are looking for
- @result Set with names of test names starting with 'prefix_name'
- """
- result = list()
- for test_name in test_case_name_list:
- if test_name.startswith(prefix_name):
- result.append(test_name)
- return sorted(result)
-
filtered_ctest_test_list = ctest_test_list
test_list = None
invalid_test_names = []
@@ -143,17 +132,15 @@ def create_filtered_test_list(ctest_test_list, test_by_names, skip_test, test_sp
gt_logger.gt_log("test case filter (specified with -n option)")
for test_name in set(test_list):
- if test_name.endswith('*'):
- # This 'star-sufix' filter allows users to filter tests with fixed prefixes
- # Example: -n 'TESTS-mbed_drivers* will filter all test cases with name starting with 'TESTS-mbed_drivers'
- for test_name_filtered in filter_names_by_prefix(ctest_test_list.keys(), test_name[:-1]):
- gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(test_name_filtered))
- filtered_ctest_test_list[test_name_filtered] = ctest_test_list[test_name_filtered]
- elif test_name not in ctest_test_list:
- invalid_test_names.append(test_name)
+ gt_logger.gt_log_tab(test_name)
+ matches = [test for test in ctest_test_list.keys() if fnmatch.fnmatch(test, test_name)]
+ gt_logger.gt_log_tab(str(ctest_test_list))
+ if matches:
+ for match in matches:
+ gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(match))
+ filtered_ctest_test_list[match] = ctest_test_list[match]
else:
- gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(test_name))
- filtered_ctest_test_list[test_name] = ctest_test_list[test_name]
+ invalid_test_names.append(test_name)
if skip_test:
test_list = skip_test.split(',')
| Test names are not correctly globbed
Test names only respect a wildcard that is placed at the end of the string. Ex. "mbed-os-*".
However, it does not respect the wildcard anywhere else. Ex. "*-timer"
The build tools accept these wildcards, so greentea should as well. This is the line responsible: https://github.com/ARMmbed/greentea/blob/32b95b44be653c3db527c02e1c5e1ffdc7d37f6f/mbed_greentea/mbed_greentea_cli.py#L146
Should be switched to `fnmatch`.
(This is mostly a note to myself to fix it) | ARMmbed/greentea | diff --git a/test/mbed_gt_cli.py b/test/mbed_gt_cli.py
index 0646c20..8f4a1eb 100644
--- a/test/mbed_gt_cli.py
+++ b/test/mbed_gt_cli.py
@@ -21,6 +21,36 @@ import sys
import unittest
from mbed_greentea import mbed_greentea_cli
+from mbed_greentea.tests_spec import TestSpec
+
+test_spec_def = {
+ "builds": {
+ "K64F-ARM": {
+ "platform": "K64F",
+ "toolchain": "ARM",
+ "base_path": "./.build/K64F/ARM",
+ "baud_rate": 115200,
+ "tests": {
+ "mbed-drivers-test-generic_tests":{
+ "binaries":[
+ {
+ "binary_type": "bootable",
+ "path": "./.build/K64F/ARM/mbed-drivers-test-generic_tests.bin"
+ }
+ ]
+ },
+ "mbed-drivers-test-c_strings":{
+ "binaries":[
+ {
+ "binary_type": "bootable",
+ "path": "./.build/K64F/ARM/mbed-drivers-test-c_strings.bin"
+ }
+ ]
+ }
+ }
+ }
+ }
+}
class GreenteaCliFunctionality(unittest.TestCase):
@@ -86,5 +116,36 @@ class GreenteaCliFunctionality(unittest.TestCase):
os.chdir(curr_dir)
shutil.rmtree(test1_dir)
+ def test_create_filtered_test_list(self):
+ test_spec = TestSpec()
+ test_spec.parse(test_spec_def)
+ test_build = test_spec.get_test_builds()[0]
+
+ test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(),
+ 'mbed-drivers-test-generic_*',
+ None,
+ test_spec=test_spec)
+ self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-generic_tests']))
+
+ test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(),
+ '*_strings',
+ None,
+ test_spec=test_spec)
+ self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-c_strings']))
+
+ test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(),
+ 'mbed*s',
+ None,
+ test_spec=test_spec)
+ expected = set(['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests'])
+ self.assertEqual(set(test_list.keys()), expected)
+
+ test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(),
+ '*-drivers-*',
+ None,
+ test_spec=test_spec)
+ expected = set(['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests'])
+ self.assertEqual(set(test_list.keys()), expected)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/mbed_gt_target_info.py b/test/mbed_gt_target_info.py
index e630e7b..a12ba09 100644
--- a/test/mbed_gt_target_info.py
+++ b/test/mbed_gt_target_info.py
@@ -416,7 +416,7 @@ mbed-gcc 1.1.0
with patch("mbed_greentea.mbed_target_info.walk") as _walk:
_walk.return_value = iter([("", ["foo"], []), ("foo", [], ["targets.json"])])
result = list(mbed_target_info._find_targets_json("bogus_path"))
- self.assertEqual(result, ["foo/targets.json"])
+ self.assertEqual(result, [os.path.join("foo", "targets.json")])
def test_find_targets_json_ignored(self):
with patch("mbed_greentea.mbed_target_info.walk") as _walk:
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
beautifulsoup4==4.13.3
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.3.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
fasteners==0.19
future==1.0.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
intelhex==2.3.0
junit-xml==1.9
lockfile==0.12.2
-e git+https://github.com/ARMmbed/greentea.git@68508c5f4d7cf0635c75399d0ff7cfa896fdf2cc#egg=mbed_greentea
mbed-host-tests==1.8.15
mbed-ls==1.8.15
mbed-os-tools==1.8.15
mock==5.2.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
prettytable==2.5.0
pyserial==3.5
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
six==1.17.0
soupsieve==2.6
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
| name: greentea
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- beautifulsoup4==4.13.3
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.3.9
- fasteners==0.19
- future==1.0.0
- idna==3.10
- intelhex==2.3.0
- junit-xml==1.9
- lockfile==0.12.2
- mbed-host-tests==1.8.15
- mbed-ls==1.8.15
- mbed-os-tools==1.8.15
- mock==5.2.0
- prettytable==2.5.0
- pyserial==3.5
- requests==2.32.3
- six==1.17.0
- soupsieve==2.6
- typing-extensions==4.13.0
- urllib3==2.3.0
- wcwidth==0.2.13
prefix: /opt/conda/envs/greentea
| [
"test/mbed_gt_cli.py::GreenteaCliFunctionality::test_create_filtered_test_list"
] | [] | [
"test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_greentea_version",
"test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_hello_string",
"test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_default_path",
"test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_invalid_path",
"test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_valid_path",
"test/mbed_gt_cli.py::GreenteaCliFunctionality::test_print_version",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json_ignored",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_target_from_current_dir_ok",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_path",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_target",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_valid",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_empty_json",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_in_json",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_invalid_json",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_file",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_json",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_value",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_failed_open",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_invalid_path",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_valid_path",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_add_target_info_mapping",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_json_data",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_keywords",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_name",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_target",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_multiple",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_no_keywords",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_no_name",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_json_for_build_name",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text_2",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_text",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_with_ssl_errors",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_fail",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_chars",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl_whitechars",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl_whitechars",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_version",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_whitechars",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default_missing",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_bad_platform",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_missing",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_base_target",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_empty",
"test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_inherits"
] | [] | Apache License 2.0 | 2,159 | 684 | [
"mbed_greentea/mbed_greentea_cli.py"
] |
|
ucfopen__canvasapi-149 | 65b75497665ca966c9046f2efde1273697c09e3b | 2018-02-15 17:48:04 | c69f6a9801ac275fdad46d97fa95c77c25d6f953 | diff --git a/canvasapi/account.py b/canvasapi/account.py
index 63f2ca2..3a03734 100644
--- a/canvasapi/account.py
+++ b/canvasapi/account.py
@@ -663,6 +663,7 @@ class Account(CanvasObject):
'GET',
'accounts/{}/terms'.format(self.id),
{'account_id': self.id},
+ _root='enrollment_terms',
_kwargs=combine_kwargs(**kwargs)
)
diff --git a/canvasapi/paginated_list.py b/canvasapi/paginated_list.py
index 8717902..56732fb 100644
--- a/canvasapi/paginated_list.py
+++ b/canvasapi/paginated_list.py
@@ -10,30 +10,31 @@ class PaginatedList(object):
def __init__(
self, content_class, requester, request_method, first_url, extra_attribs=None,
- **kwargs):
+ _root=None, **kwargs):
- self.__elements = list()
+ self._elements = list()
- self.__requester = requester
- self.__content_class = content_class
- self.__first_url = first_url
- self.__first_params = kwargs or {}
- self.__first_params['per_page'] = kwargs.get('per_page', 100)
- self.__next_url = first_url
- self.__next_params = self.__first_params
- self.__extra_attribs = extra_attribs or {}
- self.__request_method = request_method
+ self._requester = requester
+ self._content_class = content_class
+ self._first_url = first_url
+ self._first_params = kwargs or {}
+ self._first_params['per_page'] = kwargs.get('per_page', 100)
+ self._next_url = first_url
+ self._next_params = self._first_params
+ self._extra_attribs = extra_attribs or {}
+ self._request_method = request_method
+ self._root = _root
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, int):
- self.__get_up_to_index(index)
- return self.__elements[index]
+ self._get_up_to_index(index)
+ return self._elements[index]
else:
return self._Slice(self, index)
def __iter__(self):
- for element in self.__elements:
+ for element in self._elements:
yield element
while self._has_next():
new_elements = self._grow()
@@ -41,60 +42,68 @@ class PaginatedList(object):
yield element
def __repr__(self):
- return "<PaginatedList of type {}>".format(self.__content_class.__name__)
+ return "<PaginatedList of type {}>".format(self._content_class.__name__)
def _is_larger_than(self, index):
- return len(self.__elements) > index or self._has_next()
+ return len(self._elements) > index or self._has_next()
- def __get_up_to_index(self, index):
- while len(self.__elements) <= index and self._has_next():
+ def _get_up_to_index(self, index):
+ while len(self._elements) <= index and self._has_next():
self._grow()
def _grow(self):
new_elements = self._get_next_page()
- self.__elements += new_elements
+ self._elements += new_elements
return new_elements
def _has_next(self):
- return self.__next_url is not None
+ return self._next_url is not None
def _get_next_page(self):
- response = self.__requester.request(
- self.__request_method,
- self.__next_url,
- **self.__next_params
+ response = self._requester.request(
+ self._request_method,
+ self._next_url,
+ **self._next_params
)
data = response.json()
- self.__next_url = None
+ self._next_url = None
next_link = response.links.get('next')
- regex = r'{}(.*)'.format(re.escape(self.__requester.base_url))
+ regex = r'{}(.*)'.format(re.escape(self._requester.base_url))
- self.__next_url = re.search(regex, next_link['url']).group(1) if next_link else None
+ self._next_url = re.search(regex, next_link['url']).group(1) if next_link else None
- self.__next_params = {}
+ self._next_params = {}
content = []
+
+ if self._root:
+ try:
+ data = data[self._root]
+ except KeyError:
+ # TODO: Fix this message to make more sense to an end user.
+ raise ValueError("Invalid root value specified.")
+
for element in data:
if element is not None:
- element.update(self.__extra_attribs)
- content.append(self.__content_class(self.__requester, element))
+ element.update(self._extra_attribs)
+ content.append(self._content_class(self._requester, element))
return content
class _Slice(object):
def __init__(self, the_list, the_slice):
- self.__list = the_list
- self.__start = the_slice.start or 0
- self.__stop = the_slice.stop
- self.__step = the_slice.step or 1
+ self._list = the_list
+ self._start = the_slice.start or 0
+ self._stop = the_slice.stop
+ self._step = the_slice.step or 1
def __iter__(self):
- index = self.__start
- while not self.__finished(index):
- if self.__list._is_larger_than(index):
- yield self.__list[index]
- index += self.__step
-
- def __finished(self, index):
- return self.__stop is not None and index >= self.__stop
+ index = self._start
+ while not self._finished(index):
+ if self._list._is_larger_than(index):
+ yield self._list[index]
+ index += self._step
+
+ def _finished(self, index):
+ return self._stop is not None and index >= self._stop
| Unable to list_enrollment_terms() on 0.8.2
It seems Enrollment Terms in the Canvas API aren't structured like most other Paginated Lists. Specifically, in the example response at https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index (and my own testing), the list of term objects are nested under a top-level string key `enrollment_terms` in the JSON response. It seems like other Paginated Lists return a JSON payload where the list object of requested items is the top-level element. This difference seems to cause `paginated_list.py` to fail to parse the Enrollment Terms response in the loop over the JSON payload at https://github.com/ucfopen/canvasapi/blob/v0.8.2/canvasapi/paginated_list.py#L78-L81, since on the first iteration `element` will be the string `enrollment_terms`, which does not have an update() member function.
I'm sorry if I missed something obvious that lets me specify what I want the "root element" of the payload to be when parsing a Paginated List. I poked around a little bit but didn't see any way to mess with the data variable in _get_next_page(). | ucfopen/canvasapi | diff --git a/tests/fixtures/account.json b/tests/fixtures/account.json
index bf80693..e12a3e6 100644
--- a/tests/fixtures/account.json
+++ b/tests/fixtures/account.json
@@ -291,7 +291,8 @@
"list_enrollment_terms": {
"method": "GET",
"endpoint": "accounts/1/terms",
- "data": [
+ "data": {
+ "enrollment_terms": [
{
"id": 1,
"name": "Enrollment Term 1"
@@ -299,8 +300,8 @@
{
"id": 2,
"name": "Enrollment Term 2"
- }
- ],
+ }]
+ },
"status_code": 200
},
"list_groups_context": {
diff --git a/tests/test_paginated_list.py b/tests/test_paginated_list.py
index b7c2a1d..8fd6521 100644
--- a/tests/test_paginated_list.py
+++ b/tests/test_paginated_list.py
@@ -4,6 +4,7 @@ import unittest
import requests_mock
from canvasapi import Canvas
+from canvasapi.enrollment_term import EnrollmentTerm
from canvasapi.paginated_list import PaginatedList
from canvasapi.user import User
from tests import settings
@@ -205,3 +206,30 @@ class TestPaginatedList(unittest.TestCase):
'six_objects_three_pages'
)
self.assertEqual(pag_list.__repr__(), '<PaginatedList of type User>')
+
+ def test_root_element_incorrect(self, m):
+ register_uris({'account': ['list_enrollment_terms']}, m)
+
+ pag_list = PaginatedList(
+ EnrollmentTerm,
+ self.requester,
+ 'GET',
+ 'accounts/1/terms',
+ _root='wrong'
+ )
+
+ with self.assertRaises(ValueError):
+ pag_list[0]
+
+ def test_root_element(self, m):
+ register_uris({'account': ['list_enrollment_terms']}, m)
+
+ pag_list = PaginatedList(
+ EnrollmentTerm,
+ self.requester,
+ 'GET',
+ 'accounts/1/terms',
+ _root='enrollment_terms'
+ )
+
+ self.assertIsInstance(pag_list[0], EnrollmentTerm)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
-e git+https://github.com/ucfopen/canvasapi.git@65b75497665ca966c9046f2efde1273697c09e3b#egg=canvasapi
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.17.1
flake8==5.0.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytz==2025.2
requests==2.27.1
requests-mock==1.12.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==4.3.2
sphinx-rtd-theme==1.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.17.1
- flake8==5.0.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytz==2025.2
- requests==2.27.1
- requests-mock==1.12.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinx-rtd-theme==1.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_paginated_list.py::TestPaginatedList::test_root_element",
"tests/test_paginated_list.py::TestPaginatedList::test_root_element_incorrect"
] | [] | [
"tests/test_paginated_list.py::TestPaginatedList::test_getitem_first",
"tests/test_paginated_list.py::TestPaginatedList::test_getitem_second_page",
"tests/test_paginated_list.py::TestPaginatedList::test_iterator",
"tests/test_paginated_list.py::TestPaginatedList::test_paginated_list_empty",
"tests/test_paginated_list.py::TestPaginatedList::test_paginated_list_four_two_pages",
"tests/test_paginated_list.py::TestPaginatedList::test_paginated_list_single",
"tests/test_paginated_list.py::TestPaginatedList::test_paginated_list_six_three_pages",
"tests/test_paginated_list.py::TestPaginatedList::test_paginated_list_two_one_page",
"tests/test_paginated_list.py::TestPaginatedList::test_repr",
"tests/test_paginated_list.py::TestPaginatedList::test_slice_beginning",
"tests/test_paginated_list.py::TestPaginatedList::test_slice_end",
"tests/test_paginated_list.py::TestPaginatedList::test_slice_middle"
] | [] | MIT License | 2,160 | 1,451 | [
"canvasapi/account.py",
"canvasapi/paginated_list.py"
] |
|
piotrmaslanka__satella-11 | 5fa0a67e4d35431f2f54740ba8fcfbd7f6d8bc59 | 2018-02-16 16:59:11 | aa8d49a9754c89eb3dcdafcf32d98797b3264908 | diff --git a/satella/coding/recast_exceptions.py b/satella/coding/recast_exceptions.py
index bf164db4..32e4e1c8 100644
--- a/satella/coding/recast_exceptions.py
+++ b/satella/coding/recast_exceptions.py
@@ -45,7 +45,8 @@ class rethrow_as(object):
"""
# You can also provide just two exceptions
- if len(pairs) == 2 and all(issubclass(p, BaseException) for p in pairs):
+ if len(pairs) == 2 and not isinstance(pairs[1], (tuple, list)) \
+ and all(issubclass(p, BaseException) for p in pairs):
self.mapping = {pairs[0]: pairs[1]}
else:
self.mapping = dict(pairs)
| Bad rethrow_as
```python
@rethrow_as((UnicodeDecodeError, ConfigurationMalformed),
(json.JSONDecodeError, ConfigurationMalformed))
@rethrow_as(ValueError, ConfigurationMalformed)
@rethrow_as(binascii.Error, ConfigurationMalformed)
@rethrow_as(TypeError, ConfigurationError)
def provide(self):
return json.loads(self.root, encoding=self.encoding)
```
breaks it by treating two first pairs in a wrong way | piotrmaslanka/satella | diff --git a/tests/test_coding/test_rethrow.py b/tests/test_coding/test_rethrow.py
index ce17f722..80dd8ec6 100644
--- a/tests/test_coding/test_rethrow.py
+++ b/tests/test_coding/test_rethrow.py
@@ -39,4 +39,17 @@ class TestStuff(unittest.TestCase):
def lol():
raise ValueError()
- self.assertRaises(NameError, lol)
\ No newline at end of file
+ self.assertRaises(NameError, lol)
+
+ def test_issue_10(self):
+
+ class WTFException1(Exception): pass
+ class WTFException2(Exception): pass
+
+ @rethrow_as((NameError, WTFException1),
+ (TypeError, WTFException2))
+ def provide(exc):
+ raise exc()
+
+ self.assertRaises(WTFException1, lambda: provide(NameError))
+ self.assertRaises(WTFException2, lambda: provide(TypeError))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
monotonic==1.6
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
-e git+https://github.com/piotrmaslanka/satella.git@5fa0a67e4d35431f2f54740ba8fcfbd7f6d8bc59#egg=satella
six==1.17.0
tomli==2.2.1
typing==3.7.4.3
| name: satella
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- monotonic==1.6
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
- typing==3.7.4.3
prefix: /opt/conda/envs/satella
| [
"tests/test_coding/test_rethrow.py::TestStuff::test_issue_10"
] | [] | [
"tests/test_coding/test_rethrow.py::TestStuff::test_rethrow",
"tests/test_coding/test_rethrow.py::TestStuff::test_rethrow_2",
"tests/test_coding/test_rethrow.py::TestStuff::test_rethrow_3",
"tests/test_coding/test_rethrow.py::TestStuff::test_silencer",
"tests/test_coding/test_rethrow.py::TestStuff::test_silencer_2"
] | [] | MIT License | 2,165 | 195 | [
"satella/coding/recast_exceptions.py"
] |
|
MasoniteFramework__core-16 | 3f87388490aa65aa2906f9f3e6727d70f16c5509 | 2018-02-18 02:32:53 | 416a7a42f83d4212557f9f7edf4412fc4259eb13 | diff --git a/masonite/drivers/UploadDiskDriver.py b/masonite/drivers/UploadDiskDriver.py
index 410f732..e3721bc 100644
--- a/masonite/drivers/UploadDiskDriver.py
+++ b/masonite/drivers/UploadDiskDriver.py
@@ -12,6 +12,8 @@ class UploadDiskDriver(object):
if not location:
location = self.config.DRIVERS['disk']['location']
+ location += '/'
+
open(location + filename, 'wb').write(fileitem.file.read())
return location + filename
diff --git a/masonite/request.py b/masonite/request.py
index 8036a6f..7586018 100644
--- a/masonite/request.py
+++ b/masonite/request.py
@@ -196,11 +196,18 @@ class Request(object):
# if the url contains a parameter variable like @id:int
if '@' in url:
url = url.replace('@', '').replace(':int', '').replace(':string', '')
- print('url after @', url)
- compiled_url += '/' + str(self.param(url)) + '/'
+ compiled_url += str(self.param(url)) + '/'
else:
- compiled_url += url
- print('compiled_url:', compiled_url)
+ compiled_url += url + '/'
+
+ # The loop isn't perfect and may have an unwanted trailing slash
+ if compiled_url.endswith('/') and not self.redirect_url.endswith('/'):
+ compiled_url = compiled_url[:-1]
+
+ # The loop isn't perfect and may have 2 slashes next to eachother
+ if '//' in compiled_url:
+ compiled_url = compiled_url.replace('//', '/')
+
return compiled_url
def has_subdomain(self):
diff --git a/setup.py b/setup.py
index 8e9a335..668e018 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@ setup(
'masonite.testsuite',
'masonite.queues',
],
- version='1.3.1',
+ version='1.3.2',
install_requires=[
'validator.py==1.2.5',
'cryptography==2.1.4',
| Redirect does not compile url correctly
a redirect url of '/url/here'
compiles into "urlhere" for some reason. check tests because I thought this was fixed | MasoniteFramework/core | diff --git a/tests/test_requests.py b/tests/test_requests.py
index d68c2d8..f9b640f 100644
--- a/tests/test_requests.py
+++ b/tests/test_requests.py
@@ -149,3 +149,58 @@ def test_request_has_subdomain_returns_bool():
request.environ['HTTP_HOST'] = 'test.localhost.com'
assert request.has_subdomain() is True
+
+def test_redirect_compiles_url():
+ app = App()
+ app.bind('Request', REQUEST)
+ request = app.make('Request').load_app(app)
+
+ request.redirect('test/url')
+
+ assert request.compile_route_to_url() == '/test/url'
+
+def test_redirect_compiles_url_with_multiple_slashes():
+ app = App()
+ app.bind('Request', REQUEST)
+ request = app.make('Request').load_app(app)
+
+ request.redirect('test/url/here')
+
+ assert request.compile_route_to_url() == '/test/url/here'
+
+def test_redirect_compiles_url_with_trailing_slash():
+ app = App()
+ app.bind('Request', REQUEST)
+ request = app.make('Request').load_app(app)
+
+ request.redirect('test/url/here/')
+
+ assert request.compile_route_to_url() == '/test/url/here/'
+
+def test_redirect_compiles_url_with_parameters():
+ app = App()
+ app.bind('Request', REQUEST)
+ request = app.make('Request').load_app(app)
+
+ request.redirect('test/@id').send({'id': '1'})
+
+ assert request.compile_route_to_url() == '/test/1'
+
+def test_redirect_compiles_url_with_multiple_parameters():
+ app = App()
+ app.bind('Request', REQUEST)
+ request = app.make('Request').load_app(app)
+
+ request.redirect('test/@id/@test').send({'id': '1', 'test': 'user'})
+
+ assert request.compile_route_to_url() == '/test/1/user'
+
+def test_redirect_compiles_url_with_http():
+ app = App()
+ app.bind('Request', REQUEST)
+ request = app.make('Request').load_app(app)
+
+ request.redirect('http://google.com')
+
+ assert request.compile_route_to_url() == 'http://google.com'
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
attrs==22.2.0
bcrypt==3.1.4
boto3==1.5.24
botocore==1.8.50
certifi==2021.5.30
cffi==1.15.1
chardet==3.0.4
coverage==6.2
cryptography==2.1.4
docutils==0.18.1
execnet==1.9.0
idna==2.6
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
jmespath==0.10.0
libsass==0.22.0
MarkupSafe==2.0.1
-e git+https://github.com/MasoniteFramework/core.git@3f87388490aa65aa2906f9f3e6727d70f16c5509#egg=masonite
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycparser==2.21
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
python-dotenv==0.20.0
requests==2.18.4
requests-file==2.1.0
s3transfer==0.1.13
six==1.17.0
tldextract==2.2.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.22
validator.py==1.2.5
whitenoise==5.3.0
zipp==3.6.0
| name: core
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- attrs==22.2.0
- bcrypt==3.1.4
- boto3==1.5.24
- botocore==1.8.50
- cffi==1.15.1
- chardet==3.0.4
- coverage==6.2
- cryptography==2.1.4
- docutils==0.18.1
- execnet==1.9.0
- idna==2.6
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- jmespath==0.10.0
- libsass==0.22.0
- markupsafe==2.0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- python-dotenv==0.20.0
- requests==2.18.4
- requests-file==2.1.0
- s3transfer==0.1.13
- six==1.17.0
- tldextract==2.2.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.22
- validator-py==1.2.5
- whitenoise==5.3.0
- zipp==3.6.0
prefix: /opt/conda/envs/core
| [
"tests/test_requests.py::test_redirect_compiles_url",
"tests/test_requests.py::test_redirect_compiles_url_with_multiple_slashes",
"tests/test_requests.py::test_redirect_compiles_url_with_trailing_slash",
"tests/test_requests.py::test_redirect_compiles_url_with_parameters",
"tests/test_requests.py::test_redirect_compiles_url_with_multiple_parameters"
] | [] | [
"tests/test_requests.py::test_request_is_callable",
"tests/test_requests.py::test_request_input_should_return_input_on_get_request",
"tests/test_requests.py::test_request_all_should_return_params",
"tests/test_requests.py::test_request_has_should_return_bool",
"tests/test_requests.py::test_request_set_params_should_return_self",
"tests/test_requests.py::test_request_param_returns_parameter_set_or_false",
"tests/test_requests.py::test_request_appends_cookie",
"tests/test_requests.py::test_request_sets_and_gets_cookies",
"tests/test_requests.py::test_redirect_returns_request",
"tests/test_requests.py::test_redirectTo_returns_request",
"tests/test_requests.py::test_request_no_input_returns_false",
"tests/test_requests.py::test_request_get_cookies_returns_cookies",
"tests/test_requests.py::test_request_set_user_sets_object",
"tests/test_requests.py::test_request_loads_app",
"tests/test_requests.py::test_request_gets_input_from_container",
"tests/test_requests.py::test_redirections_reset",
"tests/test_requests.py::test_request_has_subdomain_returns_bool",
"tests/test_requests.py::test_redirect_compiles_url_with_http"
] | [] | MIT License | 2,173 | 540 | [
"masonite/drivers/UploadDiskDriver.py",
"masonite/request.py",
"setup.py"
] |
|
dropbox__stone-72 | aee647260f103dcb78a3b6af43d6000a9cbd8eaa | 2018-02-20 21:13:54 | aee647260f103dcb78a3b6af43d6000a9cbd8eaa | diff --git a/stone/backends/python_type_stubs.py b/stone/backends/python_type_stubs.py
index 6a0c6f0..c087c79 100644
--- a/stone/backends/python_type_stubs.py
+++ b/stone/backends/python_type_stubs.py
@@ -45,6 +45,7 @@ def emit_pass_if_nothing_emitted(codegen):
ending_lineno = codegen.lineno
if starting_lineno == ending_lineno:
codegen.emit("pass")
+ codegen.emit()
class ImportTracker(object):
def __init__(self):
@@ -135,6 +136,7 @@ class PythonTypeStubsBackend(CodeBackend):
for alias in namespace.linearize_aliases():
self._generate_alias_definition(namespace, alias)
+ self._generate_routes(namespace)
self._generate_imports_needed_for_typing()
def _generate_imports_for_referenced_namespaces(self, namespace):
@@ -152,8 +154,17 @@ class PythonTypeStubsBackend(CodeBackend):
with self.indent():
self._generate_struct_class_init(ns, data_type)
self._generate_struct_class_properties(ns, data_type)
+
+ self._generate_validator_for(data_type)
self.emit()
+ def _generate_validator_for(self, data_type):
+ # type: (DataType) -> None
+ cls_name = class_name_for_data_type(data_type)
+ self.emit("{}_validator = ... # type: stone_validators.Validator".format(
+ cls_name
+ ))
+
def _generate_union_class(self, ns, data_type):
# type: (ApiNamespace, Union) -> None
self.emit(self._class_declaration_for_type(ns, data_type))
@@ -162,6 +173,8 @@ class PythonTypeStubsBackend(CodeBackend):
self._generate_union_class_is_set(data_type)
self._generate_union_class_variant_creators(ns, data_type)
self._generate_union_class_get_helpers(ns, data_type)
+
+ self._generate_validator_for(data_type)
self.emit()
def _generate_union_class_vars(self, ns, data_type):
@@ -356,6 +369,22 @@ class PythonTypeStubsBackend(CodeBackend):
return map_stone_type_to_python_type(ns, data_type,
override_dict=self._pep_484_type_mapping_callbacks)
+ def _generate_routes(
+ self,
+ namespace, # type: ApiNamespace
+ ):
+ # type: (...) -> None
+ for route in namespace.routes:
+ var_name = fmt_func(route.name)
+ self.emit(
+ "{var_name} = ... # type: bb.Route".format(
+ var_name=var_name
+ )
+ )
+
+ if namespace.routes:
+ self.emit()
+
def _generate_imports_needed_for_typing(self):
# type: () -> None
if self.import_tracker.cur_namespace_typing_imports:
diff --git a/stone/ir/api.py b/stone/ir/api.py
index 2ee40e6..703961e 100644
--- a/stone/ir/api.py
+++ b/stone/ir/api.py
@@ -104,7 +104,7 @@ class ApiNamespace(object):
self.name = name
self.doc = None # type: typing.Optional[six.text_type]
self.routes = [] # type: typing.List[ApiRoute]
- self.route_by_name = {} # type: typing.Dict[str, ApiRoute]
+ self.route_by_name = {} # type: typing.Dict[typing.Text, ApiRoute]
self.data_types = [] # type: typing.List[UserDefined]
self.data_type_by_name = {} # type: typing.Dict[str, UserDefined]
self.aliases = [] # type: typing.List[Alias]
@@ -315,7 +315,7 @@ class ApiRoute(object):
def __init__(self,
name,
ast_node):
- # type: (str, AstRouteDef) -> None
+ # type: (typing.Text, AstRouteDef) -> None
"""
:param str name: Designated name of the endpoint.
:param ast_node: Raw route definition from the parser.
| python_type_stubs generator should output _validator and route objects.
This was an oversight on my original implementation of `python_type_stubs` - after integrating it into our codebase I've seen a few mypy errors along the lines of
```
has no attribute 'SomeStoneType_validator
```
and
```
Module has no attribute "name_of_a_route"
```
I might work on it this weekend, just wanted to track as an issue | dropbox/stone | diff --git a/test/test_python_type_stubs.py b/test/test_python_type_stubs.py
index dd64bc5..e4423fb 100644
--- a/test/test_python_type_stubs.py
+++ b/test/test_python_type_stubs.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import textwrap
+
MYPY = False
if MYPY:
import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression
@@ -30,6 +31,7 @@ from stone.ir import (
UnionField,
Void,
Float64)
+from stone.ir.api import ApiRoute
from stone.backends.python_type_stubs import PythonTypeStubsBackend
from test.backend_test_util import _mock_emit
@@ -167,6 +169,22 @@ def _make_namespace_with_empty_union():
return ns
+def _make_namespace_with_route():
+ # type: (...) -> ApiNamespace
+ ns = ApiNamespace("_make_namespace_with_route()")
+ mock_ast_node = Mock()
+ route_one = ApiRoute(
+ name="route_one",
+ ast_node=mock_ast_node,
+ )
+ route_two = ApiRoute(
+ name="route_two",
+ ast_node=mock_ast_node,
+ )
+ ns.add_route(route_one)
+ ns.add_route(route_two)
+ return ns
+
def _api():
api = Api(version="1.0")
return api
@@ -219,7 +237,8 @@ class TestPythonTypeStubs(unittest.TestCase):
@f1.deleter
def f1(self) -> None: ...
-
+
+ Struct1_validator = ... # type: stone_validators.Validator
class Struct2(object):
def __init__(self,
@@ -256,6 +275,7 @@ class TestPythonTypeStubs(unittest.TestCase):
@f4.deleter
def f4(self) -> None: ...
+ Struct2_validator = ... # type: stone_validators.Validator
from typing import (
@@ -298,6 +318,7 @@ class TestPythonTypeStubs(unittest.TestCase):
@nullable_list.deleter
def nullable_list(self) -> None: ...
+ NestedTypes_validator = ... # type: stone_validators.Validator
from typing import (
@@ -322,6 +343,7 @@ class TestPythonTypeStubs(unittest.TestCase):
def is_last(self) -> bool: ...
+ Union_validator = ... # type: stone_validators.Validator
class Shape(bb.Union):
point = ... # type: Shape
@@ -335,7 +357,8 @@ class TestPythonTypeStubs(unittest.TestCase):
def get_circle(self) -> float: ...
-
+ Shape_validator = ... # type: stone_validators.Validator
+
""").format(headers=_headers)
self.assertEqual(result, expected)
@@ -348,6 +371,21 @@ class TestPythonTypeStubs(unittest.TestCase):
class EmptyUnion(bb.Union):
pass
+
+ EmptyUnion_validator = ... # type: stone_validators.Validator
+
+ """).format(headers=_headers)
+ self.assertEqual(result, expected)
+
+ def test__generate_routes(self):
+ # type: () -> None
+ ns = _make_namespace_with_route()
+ result = self._evaluate_namespace(ns)
+ expected = textwrap.dedent("""\
+ {headers}
+
+ route_one = ... # type: bb.Route
+ route_two = ... # type: bb.Route
""").format(headers=_headers)
self.assertEqual(result, expected)
@@ -372,6 +410,7 @@ class TestPythonTypeStubs(unittest.TestCase):
@f1.deleter
def f1(self) -> None: ...
+ Struct1_validator = ... # type: stone_validators.Validator
AliasToStruct1 = Struct1
""").format(headers=_headers)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"test/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
ply==3.11
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
-e git+https://github.com/dropbox/stone.git@aee647260f103dcb78a3b6af43d6000a9cbd8eaa#egg=stone
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: stone
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- ply==3.11
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/stone
| [
"test/test_python_type_stubs.py::TestPythonTypeStubs::test__generate_base_namespace_module__with_alias",
"test/test_python_type_stubs.py::TestPythonTypeStubs::test__generate_base_namespace_module__with_many_structs",
"test/test_python_type_stubs.py::TestPythonTypeStubs::test__generate_base_namespace_module__with_nested_types",
"test/test_python_type_stubs.py::TestPythonTypeStubs::test__generate_base_namespace_module_with_empty_union__generates_pass",
"test/test_python_type_stubs.py::TestPythonTypeStubs::test__generate_base_namespace_module_with_union__generates_stuff",
"test/test_python_type_stubs.py::TestPythonTypeStubs::test__generate_routes"
] | [] | [] | [] | MIT License | 2,187 | 968 | [
"stone/backends/python_type_stubs.py",
"stone/ir/api.py"
] |
|
level12__keg-elements-71 | 7838f08ef0e8fec7e2e74c8dcad21f748e98a697 | 2018-02-21 02:54:26 | 2b0ee8adedd6e21ffa426a5abf3adb9c79706abe | diff --git a/keg_elements/forms/validators.py b/keg_elements/forms/validators.py
index f65db1d..d473d7a 100644
--- a/keg_elements/forms/validators.py
+++ b/keg_elements/forms/validators.py
@@ -47,8 +47,18 @@ class ValidateUnique(object):
def __init__(self, object_html_link=None):
self.object_html_link = object_html_link
+ def get_obj(self, form):
+ if hasattr(form, 'obj'):
+ return form.obj
+ if hasattr(form, '_obj'):
+ return form._obj
+
+ raise AttributeError(
+ 'Form must provide either `obj` or `_obj` property for uniqueness validation.'
+ )
+
def __call__(self, form, field):
- obj = getattr(form, 'obj')
+ obj = self.get_obj(form)
other = form.get_object_by_field(field)
both_exist = None not in (obj, other)
@@ -57,7 +67,7 @@ class ValidateUnique(object):
if (both_exist and not same_record) or another_exists_with_value:
link = (' to {}.'.format(self.object_html_link(other))
- if hasattr(self, 'object_html_link') else '.')
+ if self.object_html_link is not None else '.')
msg = jinja2.Markup('This value must be unique but is already assigned'
'{}'.format(link))
raise ValidationError(msg)
| `ValidateUnique` form validator can integrate better
Right now `ValidateUnique` requires that the Form have a `obj` member. This must be added manually to the form since it's not included by default on a WTForm form. However, WTForms do have a `_obj` member which has the same purpose. We could make `ValidateUnique` easier to use by utilizing this member. | level12/keg-elements | diff --git a/keg_elements/tests/test_forms/test_validators.py b/keg_elements/tests/test_forms/test_validators.py
index 103894d..e390e5d 100644
--- a/keg_elements/tests/test_forms/test_validators.py
+++ b/keg_elements/tests/test_forms/test_validators.py
@@ -1,24 +1,67 @@
+import jinja2
import pytest
import wtforms
+from werkzeug.datastructures import MultiDict
+
import keg_elements.forms.validators as validators
+from keg_elements.forms import ModelForm
-class Form():
- def __init__(self, obj):
- self.obj = obj
+class UniqueForm(ModelForm):
+ uq_field = wtforms.fields.StringField('thing', validators=[
+ validators.ValidateUnique(object_html_link=lambda field: 'link')])
def get_object_by_field(self, field):
- return 1
+ return 1 if field.data == '1' else None
class TestUniqueValidator(object):
- def test_unique_validator(self):
- field = wtforms.fields.StringField('thing')
- validator = validators.ValidateUnique(object_html_link=lambda field: 'link')
+ def test_validation_passes(self):
+ form = UniqueForm(MultiDict({'uq_field': '1'}), obj=1)
+ assert form.uq_field.validate(form) is True
+
+ form = UniqueForm(MultiDict({'uq_field': '2'}), obj=1)
+ assert form.uq_field.validate(form) is True
+
+ def test_validation_fails(self):
+ form = UniqueForm(MultiDict({'uq_field': '1'}))
+ assert form.uq_field.validate(form) is False
+ assert form.uq_field.errors == [
+ jinja2.Markup('This value must be unique but is already assigned to link.')
+ ]
+
+ form = UniqueForm(MultiDict({'uq_field': '1'}), obj=2)
+ assert form.uq_field.validate(form) is False
+ assert form.uq_field.errors == [
+ jinja2.Markup('This value must be unique but is already assigned to link.')
+ ]
+
+ def test_no_object_link_provided(self):
+ class Form(ModelForm):
+ uq_field = wtforms.fields.StringField('thing', validators=[validators.ValidateUnique()])
+
+ def get_object_by_field(self, field):
+ return 1 if field.data == '1' else None
+
+ form = Form(MultiDict({'uq_field': '1'}))
+ assert form.uq_field.validate(form) is False
+ assert form.uq_field.errors == [
+ jinja2.Markup('This value must be unique but is already assigned.')
+ ]
+
+ def test_get_obj(self):
+ class Form:
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+ validator = validators.ValidateUnique()
+
+ assert validator.get_obj(Form(obj='foo')) == 'foo'
+ assert validator.get_obj(Form(_obj='foo')) == 'foo'
- assert validator(Form(1), field) is True
+ with pytest.raises(AttributeError) as exc:
+ validator.get_obj(Form())
- with pytest.raises(wtforms.ValidationError,
- message=('This valuue must be unique bit is already '
- 'assigned link.')):
- assert validator(Form(2), field)
+ assert str(exc.value) == (
+ 'Form must provide either `obj` or `_obj` property for uniqueness validation.'
+ )
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/runtime.txt",
"requirements/testing.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
arrow==1.3.0
beautifulsoup4==4.13.3
BlazeUtils==0.7.0
blinker==1.9.0
cffi==1.17.1
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
cssselect==1.3.0
exceptiongroup==1.2.2
flake8==7.2.0
Flask==3.1.0
Flask-SQLAlchemy==3.1.1
Flask-WebTest==0.1.6
Flask-WTF==1.2.2
greenlet==3.1.1
importlib_metadata==8.6.1
infinity==1.5
iniconfig==2.1.0
intervals==0.9.2
itsdangerous==2.2.0
Jinja2==3.1.6
Keg==0.11.1
-e git+https://github.com/level12/keg-elements.git@7838f08ef0e8fec7e2e74c8dcad21f748e98a697#egg=KegElements
lxml==5.3.1
MarkupSafe==3.0.2
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.2
pyquery==2.0.1
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
six==1.17.0
soupsieve==2.6
SQLAlchemy==2.0.40
SQLAlchemy-Utils==0.41.2
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
validators==0.34.0
waitress==3.0.2
WebOb==1.8.9
WebTest==3.0.4
Werkzeug==3.1.3
wrapt==1.17.2
WTForms==3.2.1
WTForms-Alchemy==0.19.0
WTForms-Components==0.11.0
zipp==3.21.0
| name: keg-elements
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- arrow==1.3.0
- beautifulsoup4==4.13.3
- blazeutils==0.7.0
- blinker==1.9.0
- cffi==1.17.1
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- cssselect==1.3.0
- exceptiongroup==1.2.2
- flake8==7.2.0
- flask==3.1.0
- flask-sqlalchemy==3.1.1
- flask-webtest==0.1.6
- flask-wtf==1.2.2
- greenlet==3.1.1
- importlib-metadata==8.6.1
- infinity==1.5
- iniconfig==2.1.0
- intervals==0.9.2
- itsdangerous==2.2.0
- jinja2==3.1.6
- keg==0.11.1
- lxml==5.3.1
- markupsafe==3.0.2
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.2
- pyquery==2.0.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- six==1.17.0
- soupsieve==2.6
- sqlalchemy==2.0.40
- sqlalchemy-utils==0.41.2
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- validators==0.34.0
- waitress==3.0.2
- webob==1.8.9
- webtest==3.0.4
- werkzeug==3.1.3
- wrapt==1.17.2
- wtforms==3.2.1
- wtforms-alchemy==0.19.0
- wtforms-components==0.11.0
- zipp==3.21.0
prefix: /opt/conda/envs/keg-elements
| [
"keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_get_obj"
] | [
"keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_validation_passes",
"keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_validation_fails",
"keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_no_object_link_provided"
] | [] | [] | BSD License | 2,189 | 334 | [
"keg_elements/forms/validators.py"
] |
|
jnothman__UpSetPlot-29 | a272986182e3f1de64ab7d20fd44c59f8b693017 | 2018-02-22 10:36:28 | a272986182e3f1de64ab7d20fd44c59f8b693017 | diff --git a/upsetplot/plotting.py b/upsetplot/plotting.py
index 89c8506..5203bf3 100644
--- a/upsetplot/plotting.py
+++ b/upsetplot/plotting.py
@@ -79,26 +79,29 @@ class UpSet:
with_lines : bool
Whether to show lines joining dots in the matrix, to mark multiple sets
being intersected.
- intersections_plot_size : float
+ element_size : float or None
+ Side length in pt. If None, size is estimated to fit figure
+ intersections_plot_elements : int
The intersections plot should be large enough to fit this many matrix
- dots.
- totals_plot_size : float
+ elements.
+ totals_plot_elements : int
The totals plot should be large enough to fit this many matrix
dots.
"""
def __init__(self, data, vert=True, sort_by='degree',
sort_sets_by='cardinality', forecolor='black',
- with_lines=True, intersection_plot_size=6,
- totals_plot_size=2):
+ with_lines=True, element_size=32,
+ intersection_plot_elements=6, totals_plot_elements=2):
self._vert = vert
if not vert:
raise NotImplementedError()
self._forecolor = forecolor
self._with_lines = with_lines
- self._totals_plot_size = totals_plot_size
- self._intersection_plot_size = intersection_plot_size
+ self._element_size = element_size
+ self._totals_plot_elements = totals_plot_elements
+ self._intersection_plot_elements = intersection_plot_elements
(self.intersections,
self.totals) = _process_data(data,
@@ -111,29 +114,42 @@ class UpSet:
n_cats = len(self.totals)
n_inters = len(self.intersections)
- text_space = self._calculate_text_ncols(fig)
- GS = matplotlib.gridspec.GridSpec
- gridspec = GS(n_cats + self._intersection_plot_size,
- n_inters + text_space + self._totals_plot_size,
- hspace=1)
- return {'intersections': gridspec[:-n_cats, -n_inters:],
- 'matrix': gridspec[-n_cats:, -n_inters:],
- 'totals': gridspec[-n_cats:, :self._totals_plot_size],
- 'gs': gridspec}
-
- def _calculate_text_ncols(self, fig):
if fig is None:
fig = plt.gcf()
+
+ # Determine text size to determine figure size / spacing
r = get_renderer(fig)
t = fig.text(0, 0, '\n'.join(self.totals.index.values))
textw = t.get_window_extent(renderer=r).width
- figw = fig.get_window_extent(renderer=r).width
- MAGIC_MARGIN = 20 # FIXME
- colw = (figw - textw - MAGIC_MARGIN) / (len(self.intersections) +
- self._totals_plot_size)
t.remove()
- return int(np.ceil(figw / colw - (len(self.intersections) +
- self._totals_plot_size)))
+
+ MAGIC_MARGIN = 10 # FIXME
+ figw = fig.get_window_extent(renderer=r).width
+ if self._element_size is None:
+ colw = (figw - textw - MAGIC_MARGIN) / (len(self.intersections) +
+ self._totals_plot_elements)
+ else:
+ render_ratio = figw / fig.get_figwidth()
+ colw = self._element_size / 72 * render_ratio
+ figw = (colw * (len(self.intersections) +
+ self._totals_plot_elements) +
+ MAGIC_MARGIN + textw)
+ fig.set_figwidth(figw / render_ratio)
+ fig.set_figheight((colw * (n_cats +
+ self._intersection_plot_elements)) /
+ render_ratio)
+
+ text_nelems = int(np.ceil(figw / colw - (len(self.intersections) +
+ self._totals_plot_elements)))
+
+ GS = matplotlib.gridspec.GridSpec
+ gridspec = GS(n_cats + self._intersection_plot_elements,
+ n_inters + text_nelems + self._totals_plot_elements,
+ hspace=1)
+ return {'intersections': gridspec[:-n_cats, -n_inters:],
+ 'matrix': gridspec[-n_cats:, -n_inters:],
+ 'totals': gridspec[-n_cats:, :self._totals_plot_elements],
+ 'gs': gridspec}
def plot_matrix(self, ax):
"""Plot the matrix of intersection indicators onto ax
@@ -152,8 +168,12 @@ class UpSet:
c[idx] = self._forecolor
x = np.repeat(np.arange(len(data)), n_sets)
y = np.tile(np.arange(n_sets), len(data))
- # TODO: make s relative to colw
- ax.scatter(x, y, c=c.tolist(), linewidth=0, s=200)
+ if self._element_size is not None:
+ s = (self._element_size * .35) ** 2
+ else:
+ # TODO: make s relative to colw
+ s = 200
+ ax.scatter(x, y, c=c.tolist(), linewidth=0, s=s)
if self._with_lines:
line_data = (pd.Series(y[idx], index=x[idx])
| Allow automatic determination of figsize from dot radius in pts
User should be able to specify a dot radius and the figure size is calculated from there... | jnothman/UpSetPlot | diff --git a/upsetplot/tests/test_upsetplot.py b/upsetplot/tests/test_upsetplot.py
index f95cbb9..1dcfcab 100644
--- a/upsetplot/tests/test_upsetplot.py
+++ b/upsetplot/tests/test_upsetplot.py
@@ -72,7 +72,7 @@ def test_param_validation(kw):
UpSet(X, **kw)
[email protected]('kw', [{}])
[email protected]('kw', [{}, {'element_size': None}])
def test_plot_smoke_test(kw):
fig = matplotlib.figure.Figure()
X = generate_data(n_samples=100)
@@ -81,6 +81,32 @@ def test_plot_smoke_test(kw):
# Also check fig is optional
n_nums = len(plt.get_fignums())
- plot(X)
+ plot(X, **kw)
assert len(plt.get_fignums()) - n_nums == 1
assert plt.gcf().axes
+
+
+def test_element_size():
+ X = generate_data(n_samples=100)
+ figsizes = []
+ for element_size in range(10, 50, 5):
+ fig = matplotlib.figure.Figure()
+ UpSet(X, element_size=element_size).make_grid(fig)
+ figsizes.append((fig.get_figwidth(), fig.get_figheight()))
+
+ figwidths, figheights = zip(*figsizes)
+ # Absolute width increases
+ assert np.all(np.diff(figwidths) > 0)
+ aspect = np.divide(figwidths, figheights)
+ # Font size stays constant, so aspect ratio decreases
+ assert np.all(np.diff(aspect) < 0)
+ # But doesn't decrease by much
+ assert np.all(aspect[:-1] / aspect[1:] < 1.1)
+
+ fig = matplotlib.figure.Figure()
+ figsize_before = fig.get_figwidth(), fig.get_figheight()
+ UpSet(X, element_size=None).make_grid(fig)
+ figsize_after = fig.get_figwidth(), fig.get_figheight()
+ assert figsize_before == figsize_after
+
+ # TODO: make sure axes are all within figure
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"doc/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cycler==0.11.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
kiwisolver==1.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
pandas==1.1.5
Pillow==8.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-gallery==0.10.0
sphinx-issues==3.0.1
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
-e git+https://github.com/jnothman/UpSetPlot.git@a272986182e3f1de64ab7d20fd44c59f8b693017#egg=UpSetPlot
urllib3==1.26.20
zipp==3.6.0
| name: UpSetPlot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cycler==0.11.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pandas==1.1.5
- pillow==8.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-gallery==0.10.0
- sphinx-issues==3.0.1
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/UpSetPlot
| [
"upsetplot/tests/test_upsetplot.py::test_plot_smoke_test[kw1]",
"upsetplot/tests/test_upsetplot.py::test_element_size"
] | [
"upsetplot/tests/test_upsetplot.py::test_process_data[None-degree-X1]",
"upsetplot/tests/test_upsetplot.py::test_process_data[cardinality-degree-X1]"
] | [
"upsetplot/tests/test_upsetplot.py::test_process_data[None-cardinality-X0]",
"upsetplot/tests/test_upsetplot.py::test_process_data[None-cardinality-X1]",
"upsetplot/tests/test_upsetplot.py::test_process_data[None-degree-X0]",
"upsetplot/tests/test_upsetplot.py::test_process_data[cardinality-cardinality-X0]",
"upsetplot/tests/test_upsetplot.py::test_process_data[cardinality-cardinality-X1]",
"upsetplot/tests/test_upsetplot.py::test_process_data[cardinality-degree-X0]",
"upsetplot/tests/test_upsetplot.py::test_not_aggregated[None-cardinality]",
"upsetplot/tests/test_upsetplot.py::test_not_aggregated[None-degree]",
"upsetplot/tests/test_upsetplot.py::test_not_aggregated[cardinality-cardinality]",
"upsetplot/tests/test_upsetplot.py::test_not_aggregated[cardinality-degree]",
"upsetplot/tests/test_upsetplot.py::test_param_validation[kw0]",
"upsetplot/tests/test_upsetplot.py::test_param_validation[kw1]",
"upsetplot/tests/test_upsetplot.py::test_param_validation[kw2]",
"upsetplot/tests/test_upsetplot.py::test_param_validation[kw3]",
"upsetplot/tests/test_upsetplot.py::test_param_validation[kw4]",
"upsetplot/tests/test_upsetplot.py::test_plot_smoke_test[kw0]"
] | [] | New BSD License | 2,194 | 1,252 | [
"upsetplot/plotting.py"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.