instance_id
stringlengths
12
57
base_commit
stringlengths
40
40
created_at
stringdate
2015-01-06 14:05:07
2025-04-29 17:56:51
environment_setup_commit
stringlengths
40
40
hints_text
stringlengths
0
158k
patch
stringlengths
261
20.8k
problem_statement
stringlengths
11
52.5k
repo
stringlengths
7
53
test_patch
stringlengths
280
206k
meta
dict
version
stringclasses
463 values
install_config
dict
requirements
stringlengths
93
34k
environment
stringlengths
772
20k
FAIL_TO_PASS
sequencelengths
1
856
FAIL_TO_FAIL
sequencelengths
0
536
PASS_TO_PASS
sequencelengths
0
7.87k
PASS_TO_FAIL
sequencelengths
0
92
license_name
stringclasses
35 values
__index_level_0__
int64
11
21.4k
num_tokens_patch
int64
103
4.99k
before_filepaths
sequencelengths
0
14
buyalsky__ordered-hash-set-5
e6c09d81849cbb832099792b2411861dd01c687f
2020-10-09 14:29:06
e6c09d81849cbb832099792b2411861dd01c687f
diff --git a/src/ordered_hash_set.py b/src/ordered_hash_set.py index 27ae04b..95a4647 100644 --- a/src/ordered_hash_set.py +++ b/src/ordered_hash_set.py @@ -9,6 +9,7 @@ class OrderedSet: def add(self, item): """ Adds the item to set if it is not exist. + Returns the index of added item (or already existing item) in the respective set. Raises TypeError if specified item is not hashable :param item: (object), item to be added. @@ -21,6 +22,9 @@ class OrderedSet: if len(self._items) != 1: self._items[self._last][1][1] = item self._last = item + return self.__len__() - 1 + else: + return self.get_all().index(item) def update(self, *items): """ @@ -35,10 +39,12 @@ class OrderedSet: def remove(self, item): """ Removes given item from set. - Raises KeyError if item is not found. + Returns the used index of the removed item in the respective set. + Raises ValueError if item is not found. :param item: (object), Removed item """ + index = self.get_all().index(item) removed_item = self._items.pop(item) previous_item, next_item = removed_item[1] if item == self._first: @@ -54,6 +60,7 @@ class OrderedSet: self._items[previous_item][1][1] = next_item if next_item: self._items[next_item][1][0] = previous_item + return index def remove_all(self, *items): """
Returning indexes OrderedSet's `add` method should return index of the added item. If the item is already in the OrderedSet instance, the method should return the index it already had. Similarly, `remove` method should return index of the removed item. Example: ```python3 s = OrderedSet() s.add('Foo') # Should return 0 s.add('Bar') # Should return 1 s.add('Foo') # Should return 0 since 'Foo' is already in OrderedSet and its index is 0 s.remove('Bar') # Should return 1 ```
buyalsky/ordered-hash-set
diff --git a/src/tests/test_cases.py b/src/tests/test_cases.py index 592e97d..8f3bfee 100644 --- a/src/tests/test_cases.py +++ b/src/tests/test_cases.py @@ -34,39 +34,33 @@ def test_remove(filled_set): filled_set.remove_all(4, 6) print(filled_set) assert filled_set == OrderedSet(1, 2, 3, 5, "str") - with pytest.raises(KeyError): + with pytest.raises(ValueError): filled_set.remove(-1) def test_remove_first(filled_set): - filled_set.remove(1) + assert 0 == filled_set.remove(1) assert filled_set[0] == 2 assert filled_set == OrderedSet(2, 3, 4, 5, 6) def test_remove_last(filled_set): - filled_set.remove(6) + assert 5 == filled_set.remove(6) assert filled_set[-1] == 5 def test_add_remove_mixed(filled_set): - filled_set.remove(1) + assert 0 == filled_set.remove(1) assert filled_set[0] == 2 assert filled_set == OrderedSet(2, 3, 4, 5, 6) - filled_set.remove(6) + assert 4 == filled_set.remove(6) assert filled_set == OrderedSet(2, 3, 4, 5) - filled_set.add(7) + assert 4 == filled_set.add(7) assert filled_set == OrderedSet(2, 3, 4, 5, 7) def test_len(filled_set): - s = OrderedSet() - s.add(1) - s.add(1) - s.add(1) - s.add(2) - s.add(3) - s.remove(1) + s = OrderedSet(1, 3) assert len(s) == 2 assert len(filled_set) == 6
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/buyalsky/ordered-hash-set.git@e6c09d81849cbb832099792b2411861dd01c687f#egg=ordered_hash_set packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: ordered-hash-set channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/ordered-hash-set
[ "src/tests/test_cases.py::test_remove", "src/tests/test_cases.py::test_remove_first", "src/tests/test_cases.py::test_remove_last", "src/tests/test_cases.py::test_add_remove_mixed" ]
[]
[ "src/tests/test_cases.py::test_contains", "src/tests/test_cases.py::test_contains_any", "src/tests/test_cases.py::test_contains_all", "src/tests/test_cases.py::test_len", "src/tests/test_cases.py::test_len2", "src/tests/test_cases.py::test_index_access", "src/tests/test_cases.py::test_equality", "src/tests/test_cases.py::test_uniqueness", "src/tests/test_cases.py::test_add_unhashable", "src/tests/test_cases.py::test_drain", "src/tests/test_cases.py::test_drain2", "src/tests/test_cases.py::test_drain_reversed", "src/tests/test_cases.py::test_formatting" ]
[]
MIT License
8,668
415
[ "src/ordered_hash_set.py" ]
Mykrobe-tools__mykrobe-104
98befa765550e9b0aeb80d0f4f01cf765ca44bdb
2020-10-09 15:09:51
98befa765550e9b0aeb80d0f4f01cf765ca44bdb
diff --git a/src/mykrobe/cmds/amr.py b/src/mykrobe/cmds/amr.py index 8901564..fce6c0b 100755 --- a/src/mykrobe/cmds/amr.py +++ b/src/mykrobe/cmds/amr.py @@ -25,6 +25,7 @@ from mykrobe.predict import MykrobePredictorSusceptibilityResult from mykrobe.metagenomics import AMRSpeciesPredictor from mykrobe.species_data import DataDir from mykrobe.utils import load_json +from mykrobe.utils import fix_amino_acid_X_variants_keys from mykrobe.version import __version__ as predictor_version from mykrobe.version import __version__ as atlas_version @@ -145,7 +146,9 @@ class ConfThresholder: def ref_data_from_args(args): if args.species == "custom": if args.custom_probe_set_path is None: - raise ValueError("Must use --custom_probe_set_path option if the species is 'custom'") + raise ValueError( + "Must use --custom_probe_set_path option if the species is 'custom'" + ) ref_data = { "fasta_files": [args.custom_probe_set_path], "var_to_res_json": args.custom_variant_to_resistance_json, @@ -184,7 +187,9 @@ def detect_species_and_get_depths(cov_parser, hierarchy_json, wanted_phylo_group return {}, depths species_predictor = AMRSpeciesPredictor( - phylo_group_covgs=cov_parser.covgs.get("complex", cov_parser.covgs.get("phylo_group", {})), + phylo_group_covgs=cov_parser.covgs.get( + "complex", cov_parser.covgs.get("phylo_group", {}) + ), sub_complex_covgs=cov_parser.covgs.get("sub-complex", {}), species_covgs=cov_parser.covgs["species"], lineage_covgs=cov_parser.covgs.get("sub-species", {}), @@ -193,7 +198,11 @@ def detect_species_and_get_depths(cov_parser, hierarchy_json, wanted_phylo_group phylogenetics = species_predictor.run() if wanted_phylo_group in species_predictor.out_json["phylogenetics"]["phylo_group"]: - depths = [species_predictor.out_json["phylogenetics"]["phylo_group"][wanted_phylo_group]["median_depth"]] + depths = [ + species_predictor.out_json["phylogenetics"]["phylo_group"][ + wanted_phylo_group + ]["median_depth"] + ] return phylogenetics, depths @@ -231,15 +240,33 @@ def write_outputs(args, base_json): print(output) +def fix_X_amino_acid_variants(sample_json): + if "susceptibility" in sample_json: + for drug_dict in sample_json["susceptibility"].values(): + if "called_by" in drug_dict: + fix_amino_acid_X_variants_keys(drug_dict["called_by"]) + + if "variant_calls" in sample_json: + fix_amino_acid_X_variants_keys(sample_json["variant_calls"]) + + def run(parser, args): logger.info(f"Start runnning mykrobe predict. Command line: {' '.join(sys.argv)}") base_json = {args.sample: {}} args = parser.parse_args() ref_data = ref_data_from_args(args) - if args.species == "custom" and ref_data["var_to_res_json"] is None and ref_data["lineage_json"] is None: - logger.info("Forcing --report_all_calls because species is 'custom' and options --custom_variant_to_resistance_json,--custom_lineage_json were not used") + if ( + args.species == "custom" + and ref_data["var_to_res_json"] is None + and ref_data["lineage_json"] is None + ): + logger.info( + "Forcing --report_all_calls because species is 'custom' and options --custom_variant_to_resistance_json,--custom_lineage_json were not used" + ) args.report_all_calls = True - logger.info(f"Running mykrobe predict using species {args.species}, and panel version {ref_data['version']}") + logger.info( + f"Running mykrobe predict using species {args.species}, and panel version {ref_data['version']}" + ) # Run Cortex cp = CoverageParser( @@ -260,7 +287,9 @@ def run(parser, args): phylogenetics = {} depths = [cp.estimate_depth()] else: - phylogenetics, depths = detect_species_and_get_depths(cp, ref_data["hierarchy_json"], ref_data["species_phylo_group"]) + phylogenetics, depths = detect_species_and_get_depths( + cp, ref_data["hierarchy_json"], ref_data["species_phylo_group"] + ) # Genotype variant_calls_dict = {} @@ -326,7 +355,10 @@ def run(parser, args): if args.conf_percent_cutoff < 100: logger.debug("Expected depth: " + str(depths[0])) conf_thresholder = ConfThresholder( - kmer_count_error_rate, depths[0], ref_data["kmer"], incorrect_kmer_to_pc_cov + kmer_count_error_rate, + depths[0], + ref_data["kmer"], + incorrect_kmer_to_pc_cov, ) time_start = time.time() conf_threshold = conf_thresholder.get_conf_threshold( @@ -369,7 +401,11 @@ def run(parser, args): depths = [cp.estimate_depth()] mykrobe_predictor_susceptibility_result = MykrobePredictorSusceptibilityResult() - if gt is not None and (max(depths) > args.min_depth or args.force) and ref_data["var_to_res_json"] is not None: + if ( + gt is not None + and (max(depths) > args.min_depth or args.force) + and ref_data["var_to_res_json"] is not None + ): predictor = BasePredictor( variant_calls=gt.variant_calls, called_genes=gt.sequence_calls_dict, @@ -383,15 +419,22 @@ def run(parser, args): logger.info("Progress: finished making AMR predictions") base_json[args.sample] = { - "susceptibility": list(mykrobe_predictor_susceptibility_result.to_dict().values())[0], - "phylogenetics": {} if phylogenetics == {} else list(phylogenetics.to_dict().values())[0], + "susceptibility": list( + mykrobe_predictor_susceptibility_result.to_dict().values() + )[0], + "phylogenetics": {} + if phylogenetics == {} + else list(phylogenetics.to_dict().values())[0], "variant_calls": variant_calls_dict, "sequence_calls": sequence_calls_dict, "lineage_calls": lineage_calls_dict, "kmer": ref_data["kmer"], "probe_sets": ref_data["fasta_files"], "files": args.seq, - "version": {"mykrobe-predictor": predictor_version, "mykrobe-atlas": atlas_version}, + "version": { + "mykrobe-predictor": predictor_version, + "mykrobe-atlas": atlas_version, + }, "genotype_model": args.model, } if len(lineage_predict_dict) > 0: @@ -401,5 +444,6 @@ def run(parser, args): cp.remove_temporary_files() logger.info("Progress: writing output") + fix_X_amino_acid_variants(base_json[args.sample]) write_outputs(args, base_json) logger.info("Progress: finished") diff --git a/src/mykrobe/utils.py b/src/mykrobe/utils.py index 2a7f58c..889d8c8 100644 --- a/src/mykrobe/utils.py +++ b/src/mykrobe/utils.py @@ -3,13 +3,16 @@ import hashlib import re import json +from Bio.Seq import Seq + def check_args(args): if args.db_name is None: args.db_name = os.environ.get("DB_NAME") if args.db_name is None: raise ValueError( - "db_name needs to be set. Either run with --db_name :db_name or export DB_NAME=:db_name") + "db_name needs to be set. Either run with --db_name :db_name or export DB_NAME=:db_name" + ) return args @@ -18,8 +21,7 @@ def make_hash(s): def make_var_hash(ref, pos, alts): - var = "".join( - [ref, str(pos), "/".join(alts)]) + var = "".join([ref, str(pos), "/".join(alts)]) return make_hash(var) @@ -44,7 +46,7 @@ def get_params(url): except IndexError: return params p_str = p_str.split(" ")[0] - p_str = p_str.split('&') + p_str = p_str.split("&") for p in p_str: k, v = p.split("=") params[k] = v @@ -58,27 +60,73 @@ def median(lst): lstLen = len(lst) index = (lstLen - 1) // 2 - if (lstLen % 2): + if lstLen % 2: return sortedLst[index] else: return (sortedLst[index] + sortedLst[index + 1]) / 2.0 def load_json(f): - with open(f, 'r') as infile: + with open(f, "r") as infile: return json.load(infile) def lazyprop(fn): - attr_name = '_' + fn.__name__ + attr_name = "_" + fn.__name__ @property def _lazyprop(self): if not hasattr(self, attr_name): setattr(self, attr_name, fn(self)) return getattr(self, attr_name) + return _lazyprop + def seq_to_kmers(seq, kmer_size): - for i in range(len(seq)-kmer_size+1): - yield seq[i:i+kmer_size] + for i in range(len(seq) - kmer_size + 1): + yield seq[i : i + kmer_size] + + +def _x_mutation_fixed_var_name(var_name): + """Takes mutation name from results base_json. If it is an "X" amino acid + mutation, returns new name, where X is fixed with correct amino acid name. + Otherwise returns None""" + # (prefix-) (--middle-) + # Example var_name with an X: "katG_S315X-GCT2155167GGT" + # | ||| + # aa codon + match = re.match( + r"""(?P<prefix>.*_[A-Z][0-9]+)(?P<aa>[A-Z])(?P<middle>-[ACGT]{3}[0-9]+)(?P<codon>[ACGT]{3})""", + var_name, + ) + if match is None or match.group("aa") != "X": + return None + try: + amino_acid = str(Seq(match.group("codon")).translate()) + except: + return None + return ( + match.group("prefix") + + amino_acid + + match.group("middle") + + match.group("codon") + ) + + +def fix_amino_acid_X_variants_keys(dict_to_fix): + """The way panels and variants work mean that the 'any amino acid' variants + look like eg H445X, where X is any amino acid other than H. Users want to + know the actual change. This function changes all keys in dict_to_fix that + have those variants, changing the X to the actual amino acid.""" + keys_to_replace = {} + for key in dict_to_fix: + new_key = _x_mutation_fixed_var_name(key) + if new_key is not None: + assert new_key not in keys_to_replace + assert new_key not in dict_to_fix + keys_to_replace[key] = new_key + + for key, new_key in keys_to_replace.items(): + dict_to_fix[new_key] = dict_to_fix[key] + del dict_to_fix[key]
X mutations in 201901 panel Dear mykrobe team, In the 201901 probe set, many (most?) putative mutations are annotated as "X". For example, all resistance mutations at position 445 in the rpoB gene is now annotated as H445X. Our clinicians are very interested in the actual amino acid change, so at the moment I am doing this with a fairly flimsy in-house script. What I'm wondering is if there are any plans to change the probe annotations back to their specific amino acid changes, or if there are any other tricks to getting the actual changes annotated in the output? Best regards, Ola Brynildsrud Norwegian Institute of Public Health
Mykrobe-tools/mykrobe
diff --git a/tests/utils_test.py b/tests/utils_test.py new file mode 100644 index 0000000..1a9ac1e --- /dev/null +++ b/tests/utils_test.py @@ -0,0 +1,26 @@ +import pytest + +from mykrobe import utils + + +def test_x_mutation_fixed_var_name(): + assert "katG_S315G-GCT2155167GGT" == utils._x_mutation_fixed_var_name( + "katG_S315X-GCT2155167GGT" + ) + assert utils._x_mutation_fixed_var_name("katG_S315.-GCT2155167GGT") is None + assert utils._x_mutation_fixed_var_name("katG_S315X-GCT2155167NNN") is None + + +def test_fix_amino_acid_X_variants_keys(): + test_dict = { + "foo": "bar", + "katG_S315X-GCT2155167GGT": "baz", + "katG_S315C-GCT2155167CTT": "baz", + } + + utils.fix_amino_acid_X_variants_keys(test_dict) + assert test_dict == { + "foo": "bar", + "katG_S315G-GCT2155167GGT": "baz", + "katG_S315C-GCT2155167CTT": "baz", + }
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 2 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anytree==2.8.0 attrs==22.2.0 biopython==1.79 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 Cython==3.0.12 distlib==0.3.9 filelock==3.4.1 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 mongoengine==0.24.2 -e git+https://github.com/Mykrobe-tools/mykrobe.git@98befa765550e9b0aeb80d0f4f01cf765ca44bdb#egg=mykrobe numpy==1.15.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pymongo==4.1.1 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 PyVCF==0.4.3 requests==2.27.1 six==1.17.0 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: mykrobe channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anytree==2.8.0 - attrs==22.2.0 - biopython==1.79 - charset-normalizer==2.0.12 - coverage==6.2 - cython==3.0.12 - distlib==0.3.9 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mongoengine==0.24.2 - numpy==1.15.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pymongo==4.1.1 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pyvcf==0.4.3 - requests==2.27.1 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/mykrobe
[ "tests/utils_test.py::test_x_mutation_fixed_var_name", "tests/utils_test.py::test_fix_amino_acid_X_variants_keys" ]
[]
[]
[]
MIT License
8,669
2,882
[ "src/mykrobe/cmds/amr.py", "src/mykrobe/utils.py" ]
astanin__python-tabulate-96
2552e6dfb23cac990aeabb27b86745878d62247e
2020-10-10 14:15:03
3f0757e117ed2ca1171bbf84b61793f353d67282
diff --git a/tabulate.py b/tabulate.py index 5d57167..d611dc1 100755 --- a/tabulate.py +++ b/tabulate.py @@ -1022,7 +1022,7 @@ def _normalize_tabular_data(tabular_data, headers, showindex="default"): elif hasattr(tabular_data, "index"): # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) keys = list(tabular_data) - if tabular_data.index.name is not None: + if showindex in ["default", "always", True] and tabular_data.index.name is not None: if isinstance(tabular_data.index.name, list): keys[:0] = tabular_data.index.name else:
Shift headers wrongly when do tabulate(headers="keys", showindex=False) with pandas.DataFrame with named index ```python import pandas as pd from tabulate import tabulate df = pd.DataFrame({"a": [0,1,2]}, index=pd.Index([1,2,3], name="b")) print(tabulate(df, headers="keys", showindex=False)) ``` Expected result is: ``` a --- 0 1 2 ``` but got (index name is wrongly used as a header): ``` b --- 0 1 2 ```
astanin/python-tabulate
diff --git a/test/test_output.py b/test/test_output.py index 0e72c71..061d302 100644 --- a/test/test_output.py +++ b/test/test_output.py @@ -1363,7 +1363,8 @@ def test_pandas_without_index(): import pandas df = pandas.DataFrame( - [["one", 1], ["two", None]], columns=["string", "number"], index=["a", "b"] + [["one", 1], ["two", None]], columns=["string", "number"], + index=pandas.Index(["a", "b"], name="index") ) expected = "\n".join( [
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.8
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .[widechars]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "wcwidth" ], "pre_install": [], "python": "3.8", "reqs_path": [ ".circleci/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 chardet==5.2.0 colorama==0.4.6 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.16.1 iniconfig==2.1.0 numpy==1.24.4 packaging==24.2 pandas==2.0.3 platformdirs==4.3.6 pluggy==1.5.0 pyproject-api==1.8.0 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 -e git+https://github.com/astanin/python-tabulate.git@2552e6dfb23cac990aeabb27b86745878d62247e#egg=tabulate tomli==2.2.1 tox==4.25.0 typing_extensions==4.13.0 tzdata==2025.2 virtualenv==20.29.3 wcwidth==0.2.13
name: python-tabulate channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - chardet==5.2.0 - colorama==0.4.6 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.16.1 - iniconfig==2.1.0 - numpy==1.24.4 - packaging==24.2 - pandas==2.0.3 - platformdirs==4.3.6 - pluggy==1.5.0 - pyproject-api==1.8.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - tomli==2.2.1 - tox==4.25.0 - typing-extensions==4.13.0 - tzdata==2025.2 - virtualenv==20.29.3 - wcwidth==0.2.13 prefix: /opt/conda/envs/python-tabulate
[ "test/test_output.py::test_pandas_without_index" ]
[]
[ "test/test_output.py::test_plain", "test/test_output.py::test_plain_headerless", "test/test_output.py::test_plain_multiline_headerless", "test/test_output.py::test_plain_multiline", "test/test_output.py::test_plain_multiline_with_empty_cells", "test/test_output.py::test_plain_multiline_with_empty_cells_headerless", "test/test_output.py::test_simple", "test/test_output.py::test_simple_multiline_2", "test/test_output.py::test_simple_headerless", "test/test_output.py::test_simple_multiline_headerless", "test/test_output.py::test_simple_multiline", "test/test_output.py::test_simple_multiline_with_empty_cells", "test/test_output.py::test_simple_multiline_with_empty_cells_headerless", "test/test_output.py::test_github", "test/test_output.py::test_grid", "test/test_output.py::test_grid_wide_characters", "test/test_output.py::test_grid_headerless", "test/test_output.py::test_grid_multiline_headerless", "test/test_output.py::test_grid_multiline", "test/test_output.py::test_grid_multiline_with_empty_cells", "test/test_output.py::test_grid_multiline_with_empty_cells_headerless", "test/test_output.py::test_fancy_grid", "test/test_output.py::test_fancy_grid_headerless", "test/test_output.py::test_fancy_grid_multiline_headerless", "test/test_output.py::test_fancy_grid_multiline", "test/test_output.py::test_fancy_grid_multiline_with_empty_cells", "test/test_output.py::test_fancy_grid_multiline_with_empty_cells_headerless", "test/test_output.py::test_pipe", "test/test_output.py::test_pipe_headerless", "test/test_output.py::test_presto", "test/test_output.py::test_presto_headerless", "test/test_output.py::test_presto_multiline_headerless", "test/test_output.py::test_presto_multiline", "test/test_output.py::test_presto_multiline_with_empty_cells", "test/test_output.py::test_presto_multiline_with_empty_cells_headerless", "test/test_output.py::test_orgtbl", "test/test_output.py::test_orgtbl_headerless", "test/test_output.py::test_psql", "test/test_output.py::test_psql_headerless", "test/test_output.py::test_psql_multiline_headerless", "test/test_output.py::test_psql_multiline", "test/test_output.py::test_psql_multiline_with_empty_cells", "test/test_output.py::test_psql_multiline_with_empty_cells_headerless", "test/test_output.py::test_pretty", "test/test_output.py::test_pretty_headerless", "test/test_output.py::test_pretty_multiline_headerless", "test/test_output.py::test_pretty_multiline", "test/test_output.py::test_pretty_multiline_with_empty_cells", "test/test_output.py::test_pretty_multiline_with_empty_cells_headerless", "test/test_output.py::test_jira", "test/test_output.py::test_jira_headerless", "test/test_output.py::test_rst", "test/test_output.py::test_rst_with_empty_values_in_first_column", "test/test_output.py::test_rst_headerless", "test/test_output.py::test_rst_multiline", "test/test_output.py::test_rst_multiline_with_empty_cells", "test/test_output.py::test_rst_multiline_with_empty_cells_headerless", "test/test_output.py::test_mediawiki", "test/test_output.py::test_mediawiki_headerless", "test/test_output.py::test_moinmoin", "test/test_output.py::test_youtrack", "test/test_output.py::test_moinmoin_headerless", "test/test_output.py::test_html", "test/test_output.py::test_unsafehtml", "test/test_output.py::test_html_headerless", "test/test_output.py::test_unsafehtml_headerless", "test/test_output.py::test_latex", "test/test_output.py::test_latex_raw", "test/test_output.py::test_latex_headerless", "test/test_output.py::test_latex_booktabs", "test/test_output.py::test_latex_booktabs_headerless", "test/test_output.py::test_textile", "test/test_output.py::test_textile_with_header", "test/test_output.py::test_textile_with_center_align", "test/test_output.py::test_no_data", "test/test_output.py::test_empty_data", "test/test_output.py::test_no_data_without_headers", "test/test_output.py::test_empty_data_without_headers", "test/test_output.py::test_floatfmt", "test/test_output.py::test_floatfmt_multi", "test/test_output.py::test_colalign_multi", "test/test_output.py::test_float_conversions", "test/test_output.py::test_missingval", "test/test_output.py::test_missingval_multi", "test/test_output.py::test_column_alignment", "test/test_output.py::test_unaligned_separated", "test/test_output.py::test_pandas_with_index", "test/test_output.py::test_pandas_rst_with_index", "test/test_output.py::test_pandas_rst_with_named_index", "test/test_output.py::test_dict_like_with_index", "test/test_output.py::test_list_of_lists_with_index", "test/test_output.py::test_list_of_lists_with_supplied_index", "test/test_output.py::test_list_of_lists_with_index_firstrow", "test/test_output.py::test_disable_numparse_default", "test/test_output.py::test_disable_numparse_true", "test/test_output.py::test_disable_numparse_list", "test/test_output.py::test_preserve_whitespace" ]
[]
MIT License
8,671
189
[ "tabulate.py" ]
globocom__m3u8-231
f9a731ea7f1b5dfe57630a188637ebd944edeb0d
2020-10-10 15:13:28
f9a731ea7f1b5dfe57630a188637ebd944edeb0d
leandromoreira: thanks @RafalLukawiecki , @mauricioabreu when wer are going to release a new version let's add the proper tag on the read.me https://github.com/globocom/m3u8#supported-tags RafalLukawiecki: > thanks @RafalLukawiecki , @mauricioabreu when wer are going to release a new version let's add the proper tag on the read.me https://github.com/globocom/m3u8#supported-tags You are very welcome @leandromoreira and @mauricioabreu. As for adding info on the tag in the readme, you already show the `#EXT-X-I-FRAMES-ONLY` tag there. This PR merely adds support for an _attribute_ of that tag. As far as I can see you are not showing a majority of tag attributes in the readme—there are a lot of them, otherwise, to add. :)
diff --git a/m3u8/model.py b/m3u8/model.py index 1745535..46911e4 100644 --- a/m3u8/model.py +++ b/m3u8/model.py @@ -820,8 +820,8 @@ class IFramePlaylist(BasePathMixin): Attributes: `iframe_stream_info` is a named tuple containing the attributes: - `program_id`, `bandwidth`, `codecs` and `resolution` which - is a tuple (w, h) of integers + `program_id`, `bandwidth`, `average_bandwidth`, `codecs` and + `resolution` which is a tuple (w, h) of integers More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.13 ''' @@ -839,13 +839,13 @@ class IFramePlaylist(BasePathMixin): self.iframe_stream_info = StreamInfo( bandwidth=iframe_stream_info.get('bandwidth'), + average_bandwidth=iframe_stream_info.get('average_bandwidth'), video=iframe_stream_info.get('video'), # Audio, subtitles, and closed captions should not exist in # EXT-X-I-FRAME-STREAM-INF, so just hardcode them to None. audio=None, subtitles=None, closed_captions=None, - average_bandwidth=None, program_id=iframe_stream_info.get('program_id'), resolution=resolution_pair, codecs=iframe_stream_info.get('codecs'), @@ -861,6 +861,9 @@ class IFramePlaylist(BasePathMixin): if self.iframe_stream_info.bandwidth: iframe_stream_inf.append('BANDWIDTH=%d' % self.iframe_stream_info.bandwidth) + if self.iframe_stream_info.average_bandwidth: + iframe_stream_inf.append('AVERAGE-BANDWIDTH=%d' % + self.iframe_stream_info.average_bandwidth) if self.iframe_stream_info.resolution: res = (str(self.iframe_stream_info.resolution[0]) + 'x' + str(self.iframe_stream_info.resolution[1])) diff --git a/m3u8/parser.py b/m3u8/parser.py index 2bce015..d7b51d3 100644 --- a/m3u8/parser.py +++ b/m3u8/parser.py @@ -302,6 +302,7 @@ def _parse_i_frame_stream_inf(line, data): atribute_parser = remove_quotes_parser('codecs', 'uri') atribute_parser["program_id"] = int atribute_parser["bandwidth"] = int + atribute_parser["average_bandwidth"] = int iframe_stream_info = _parse_attribute_list(protocol.ext_x_i_frame_stream_inf, line, atribute_parser) iframe_playlist = {'uri': iframe_stream_info.pop('uri'), 'iframe_stream_info': iframe_stream_info}
Output Average Bandwidth for IFramePlaylist iframe_stream_info sets average bandwidth to None at https://github.com/globocom/m3u8/blob/master/m3u8/model.py#L847 It should be passed along if available per https://tools.ietf.org/html/rfc8216#section-4.3.4.3 and https://tools.ietf.org/html/rfc8216#section-4.3.4.2
globocom/m3u8
diff --git a/tests/playlists.py b/tests/playlists.py index 1d763da..ae908c1 100755 --- a/tests/playlists.py +++ b/tests/playlists.py @@ -1072,4 +1072,22 @@ testvideo-1596635519-4770290994-a0e5087d.ts?hdntl=exp=1596678764~acl=/*~data=hdn #EXTINF:5.0000, ''' +VARIANT_PLAYLIST_WITH_IFRAME_AVERAGE_BANDWIDTH = ''' +#EXTM3U +#EXT-X-STREAM-INF:BANDWIDTH=800000,RESOLUTION=624x352,CODECS="avc1.4d001f, mp4a.40.5" +video-800k.m3u8 +#EXT-X-STREAM-INF:BANDWIDTH=1200000,CODECS="avc1.4d001f, mp4a.40.5" +video-1200k.m3u8 +#EXT-X-STREAM-INF:BANDWIDTH=400000,CODECS="avc1.4d001f, mp4a.40.5" +video-400k.m3u8 +#EXT-X-STREAM-INF:BANDWIDTH=150000,CODECS="avc1.4d001f, mp4a.40.5" +video-150k.m3u8 +#EXT-X-STREAM-INF:BANDWIDTH=64000,CODECS="mp4a.40.5" +video-64k.m3u8 +#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=151288,RESOLUTION=624x352,CODECS="avc1.4d001f",URI="video-800k-iframes.m3u8" +#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=193350,AVERAGE_BANDWIDTH=155000,CODECS="avc1.4d001f",URI="video-1200k-iframes.m3u8" +#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=83598,AVERAGE_BANDWIDTH=65000,CODECS="avc1.4d001f",URI="video-400k-iframes.m3u8" +#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=38775,AVERAGE_BANDWIDTH=30000,CODECS="avc1.4d001f",URI="video-150k-iframes.m3u8" +''' + del abspath, dirname, join diff --git a/tests/test_parser.py b/tests/test_parser.py index 8837048..acd6632 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -491,3 +491,27 @@ def test_gap_in_parts(): assert data['segments'][0]['parts'][1]['gap'] == 'YES' assert data['segments'][0]['parts'][2]['gap_tag'] == True assert data['segments'][0]['parts'][2].get('gap', None) is None + +def test_should_parse_variant_playlist_with_iframe_with_average_bandwidth(): + data = m3u8.parse(playlists.VARIANT_PLAYLIST_WITH_IFRAME_AVERAGE_BANDWIDTH) + iframe_playlists = list(data['iframe_playlists']) + + assert True == data['is_variant'] + + assert 4 == len(iframe_playlists) + + assert 151288 == iframe_playlists[0]['iframe_stream_info']['bandwidth'] + # Check for absence of average_bandwidth if not given in the playlist + assert 'average_bandwidth' not in iframe_playlists[0]['iframe_stream_info'] + assert '624x352' == iframe_playlists[0]['iframe_stream_info']['resolution'] + assert 'avc1.4d001f' == iframe_playlists[0]['iframe_stream_info']['codecs'] + assert 'video-800k-iframes.m3u8' == iframe_playlists[0]['uri'] + + assert 38775 == iframe_playlists[-1]['iframe_stream_info']['bandwidth'] + assert 'avc1.4d001f' == ( + iframe_playlists[-1]['iframe_stream_info']['codecs'] + ) + assert 'video-150k-iframes.m3u8' == iframe_playlists[-1]['uri'] + assert 155000 == iframe_playlists[1]['iframe_stream_info']['average_bandwidth'] + assert 65000 == iframe_playlists[2]['iframe_stream_info']['average_bandwidth'] + assert 30000 == iframe_playlists[3]['iframe_stream_info']['average_bandwidth'] diff --git a/tests/test_variant_m3u8.py b/tests/test_variant_m3u8.py index fbb01e5..8bfbe99 100644 --- a/tests/test_variant_m3u8.py +++ b/tests/test_variant_m3u8.py @@ -163,3 +163,46 @@ http://example.com/hdr.m3u8 def test_variant_playlist_with_multiple_media(): variant_m3u8 = m3u8.loads(playlists.MULTI_MEDIA_PLAYLIST) assert variant_m3u8.dumps() == playlists.MULTI_MEDIA_PLAYLIST + + +def test_create_a_variant_m3u8_with_iframe_with_average_bandwidth_playlists(): + variant_m3u8 = m3u8.M3U8() + + subtitles = m3u8.Media('english_sub.m3u8', 'SUBTITLES', 'subs', 'en', + 'English', 'YES', 'YES', 'NO', None) + variant_m3u8.add_media(subtitles) + + low_playlist = m3u8.Playlist( + uri='video-800k.m3u8', + stream_info={'bandwidth': 800000, + 'average_bandwidth': 555000, + 'resolution': '624x352', + 'codecs': 'avc1.4d001f, mp4a.40.5', + 'subtitles': 'subs'}, + media=[subtitles], + base_uri='http://example.com/' + ) + low_iframe_playlist = m3u8.IFramePlaylist( + uri='video-800k-iframes.m3u8', + iframe_stream_info={'bandwidth': 151288, + 'average_bandwidth': 111000, + 'resolution': '624x352', + 'codecs': 'avc1.4d001f'}, + base_uri='http://example.com/' + ) + + variant_m3u8.add_playlist(low_playlist) + variant_m3u8.add_iframe_playlist(low_iframe_playlist) + + expected_content = """\ +#EXTM3U +#EXT-X-MEDIA:URI="english_sub.m3u8",TYPE=SUBTITLES,GROUP-ID="subs",\ +LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,FORCED=NO +#EXT-X-STREAM-INF:BANDWIDTH=800000,AVERAGE-BANDWIDTH=555000,\ +RESOLUTION=624x352,CODECS="avc1.4d001f, mp4a.40.5",SUBTITLES="subs" +video-800k.m3u8 +#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=151288,\ +AVERAGE-BANDWIDTH=111000,RESOLUTION=624x352,CODECS="avc1.4d001f",\ +URI="video-800k-iframes.m3u8" +""" + assert expected_content == variant_m3u8.dumps()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 exceptiongroup==1.2.2 importlib-metadata==6.7.0 iniconfig==2.0.0 iso8601==2.1.0 -e git+https://github.com/globocom/m3u8.git@f9a731ea7f1b5dfe57630a188637ebd944edeb0d#egg=m3u8 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 pytest-cov==4.1.0 tomli==2.0.1 typing_extensions==4.7.1 zipp==3.15.0
name: m3u8 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - exceptiongroup==1.2.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - iso8601==2.1.0 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-cov==4.1.0 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 prefix: /opt/conda/envs/m3u8
[ "tests/test_parser.py::test_should_parse_variant_playlist_with_iframe_with_average_bandwidth", "tests/test_variant_m3u8.py::test_create_a_variant_m3u8_with_iframe_with_average_bandwidth_playlists" ]
[]
[ "tests/test_parser.py::test_should_parse_simple_playlist_from_string", "tests/test_parser.py::test_should_parse_non_integer_duration_from_playlist_string", "tests/test_parser.py::test_should_parse_comma_in_title", "tests/test_parser.py::test_should_parse_simple_playlist_from_string_with_different_linebreaks", "tests/test_parser.py::test_should_parse_sliding_window_playlist_from_string", "tests/test_parser.py::test_should_parse_playlist_with_encrypted_segments_from_string", "tests/test_parser.py::test_should_load_playlist_with_iv_from_string", "tests/test_parser.py::test_should_add_key_attribute_to_segment_from_playlist", "tests/test_parser.py::test_should_add_non_key_for_multiple_keys_unencrypted_and_encrypted", "tests/test_parser.py::test_should_handle_key_method_none_and_no_uri_attr", "tests/test_parser.py::test_should_parse_playlist_with_session_encrypted_segments_from_string", "tests/test_parser.py::test_should_load_playlist_with_session_iv_from_string", "tests/test_parser.py::test_should_parse_quoted_title_from_playlist", "tests/test_parser.py::test_should_parse_unquoted_title_from_playlist", "tests/test_parser.py::test_should_parse_variant_playlist", "tests/test_parser.py::test_should_parse_variant_playlist_with_cc_subtitles_and_audio", "tests/test_parser.py::test_should_parse_variant_playlist_with_average_bandwidth", "tests/test_parser.py::test_should_parse_variant_playlist_with_video_range", "tests/test_parser.py::test_should_parse_variant_playlist_with_bandwidth_as_float", "tests/test_parser.py::test_should_parse_variant_playlist_with_iframe_playlists", "tests/test_parser.py::test_should_parse_variant_playlist_with_alt_iframe_playlists_layout", "tests/test_parser.py::test_should_parse_iframe_playlist", "tests/test_parser.py::test_should_parse_playlist_using_byteranges", "tests/test_parser.py::test_should_parse_endlist_playlist", "tests/test_parser.py::test_should_parse_ALLOW_CACHE", "tests/test_parser.py::test_should_parse_VERSION", "tests/test_parser.py::test_should_parse_program_date_time_from_playlist", "tests/test_parser.py::test_should_parse_scte35_from_playlist", "tests/test_parser.py::test_should_parse_envivio_cue_playlist", "tests/test_parser.py::test_should_parse_no_duration_cue_playlist", "tests/test_parser.py::test_parse_simple_playlist_messy", "tests/test_parser.py::test_parse_simple_playlist_messy_strict", "tests/test_parser.py::test_commaless_extinf", "tests/test_parser.py::test_commaless_extinf_strict", "tests/test_parser.py::test_should_parse_segment_map_uri", "tests/test_parser.py::test_should_parse_segment_map_uri_with_byterange", "tests/test_parser.py::test_should_parse_multiple_map_attributes", "tests/test_parser.py::test_should_parse_empty_uri_with_base_path", "tests/test_parser.py::test_should_parse_audio_channels", "tests/test_parser.py::test_should_parse_start_with_negative_time_offset", "tests/test_parser.py::test_should_parse_start_with_precise", "tests/test_parser.py::test_should_parse_session_data", "tests/test_parser.py::test_should_parse_multiple_session_data", "tests/test_parser.py::test_simple_playlist_with_discontinuity_sequence", "tests/test_parser.py::test_simple_playlist_with_custom_tags", "tests/test_parser.py::test_master_playlist_with_frame_rate", "tests/test_parser.py::test_master_playlist_with_unrounded_frame_rate", "tests/test_parser.py::test_low_latency_playlist", "tests/test_parser.py::test_low_latency_with_preload_and_byteranges_playlist", "tests/test_parser.py::test_negative_media_sequence", "tests/test_parser.py::test_daterange_simple", "tests/test_parser.py::test_date_range_with_scte_out_and_in", "tests/test_parser.py::test_date_range_in_parts", "tests/test_parser.py::test_gap", "tests/test_parser.py::test_gap_in_parts", "tests/test_variant_m3u8.py::test_create_a_variant_m3u8_with_two_playlists", "tests/test_variant_m3u8.py::test_create_a_variant_m3u8_with_two_playlists_and_two_iframe_playlists", "tests/test_variant_m3u8.py::test_variant_playlist_with_average_bandwidth", "tests/test_variant_m3u8.py::test_variant_playlist_with_video_range", "tests/test_variant_m3u8.py::test_variant_playlist_with_multiple_media" ]
[]
MIT License
8,672
677
[ "m3u8/model.py", "m3u8/parser.py" ]
StellarCN__py-stellar-base-380
c353bdf449fdda0308cd5435b627fb111b18538e
2020-10-11 09:21:56
c353bdf449fdda0308cd5435b627fb111b18538e
codecov[bot]: # [Codecov](https://codecov.io/gh/StellarCN/py-stellar-base/pull/380?src=pr&el=h1) Report > Merging [#380](https://codecov.io/gh/StellarCN/py-stellar-base/pull/380?src=pr&el=desc) into [dev](https://codecov.io/gh/StellarCN/py-stellar-base/commit/9682f4420849842a1dc6eb46fb169fa28fefb1ef?el=desc) will **decrease** coverage by `0.00%`. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/StellarCN/py-stellar-base/pull/380/graphs/tree.svg?width=650&height=150&src=pr&token=kHAKcotfqv)](https://codecov.io/gh/StellarCN/py-stellar-base/pull/380?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## dev #380 +/- ## ========================================== - Coverage 98.99% 98.98% -0.01% ========================================== Files 127 127 Lines 7578 7613 +35 ========================================== + Hits 7502 7536 +34 - Misses 76 77 +1 ``` | Flag | Coverage Δ | | |---|---|---| | #unittests | `98.98% <100.00%> (-0.01%)` | :arrow_down: | Flags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags#carryforward-flags-in-the-pull-request-comment) to find out more. | [Impacted Files](https://codecov.io/gh/StellarCN/py-stellar-base/pull/380?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [stellar\_sdk/sep/stellar\_web\_authentication.py](https://codecov.io/gh/StellarCN/py-stellar-base/pull/380/diff?src=pr&el=tree#diff-c3RlbGxhcl9zZGsvc2VwL3N0ZWxsYXJfd2ViX2F1dGhlbnRpY2F0aW9uLnB5) | `91.89% <100.00%> (-0.53%)` | :arrow_down: | | [tests/sep/test\_stellar\_web\_authentication.py](https://codecov.io/gh/StellarCN/py-stellar-base/pull/380/diff?src=pr&el=tree#diff-dGVzdHMvc2VwL3Rlc3Rfc3RlbGxhcl93ZWJfYXV0aGVudGljYXRpb24ucHk=) | `100.00% <100.00%> (ø)` | |
diff --git a/stellar_sdk/sep/stellar_web_authentication.py b/stellar_sdk/sep/stellar_web_authentication.py index 0d287b37..2ec477ac 100644 --- a/stellar_sdk/sep/stellar_web_authentication.py +++ b/stellar_sdk/sep/stellar_web_authentication.py @@ -49,7 +49,8 @@ def build_challenge_transaction( :param server_secret: secret key for server's stellar.toml `SIGNING_KEY`. :param client_account_id: The stellar account that the wallet wishes to authenticate with the server. :param domain_name: The `fully qualified domain name <https://en.wikipedia.org/wiki/Fully_qualified_domain_name>`_ - of the service requiring authentication, for example: `example.com`. + of the service requiring authentication, for example: `example.com`. (The domain_name field is reserved for + future use and not used.) :param network_passphrase: The network to connect to for verifying and retrieving additional attributes from. (ex. 'Public Global Stellar Network ; September 2015') :param timeout: Challenge duration in seconds (default to 15 minutes). @@ -96,7 +97,8 @@ def read_challenge_transaction( :param challenge_transaction: SEP0010 transaction challenge transaction in base64. :param server_account_id: public key for server's account. :param domain_name: The `fully qualified domain name <https://en.wikipedia.org/wiki/Fully_qualified_domain_name>`_ - of the service requiring authentication, for example: `example.com`. + of the service requiring authentication, for example: `example.com`. (The domain_name field is reserved for + future use and not used.) :param network_passphrase: The network to connect to for verifying and retrieving additional attributes from. (ex. 'Public Global Stellar Network ; September 2015') :raises: :exc:`InvalidSep10ChallengeError <stellar_sdk.sep.exceptions.InvalidSep10ChallengeError>` - if the @@ -157,9 +159,9 @@ def read_challenge_transaction( ) # verify that transaction contains a single Manage Data operation and its source account is not null - if len(transaction.operations) != 1: + if len(transaction.operations) < 1: raise InvalidSep10ChallengeError( - "Transaction requires a single ManageData operation." + "Transaction should contain at least one operation." ) manage_data_op = transaction.operations[0] @@ -170,12 +172,6 @@ def read_challenge_transaction( if not client_account: raise InvalidSep10ChallengeError("Operation should have a source account.") - if manage_data_op.data_name != f"{domain_name} auth": - raise InvalidSep10ChallengeError( - "The transaction's operation key name does not " - "include the expected home domain." - ) - if len(manage_data_op.data_value) != 64: raise InvalidSep10ChallengeError( "Operation value encoded as base64 should be 64 bytes long." @@ -187,6 +183,17 @@ def read_challenge_transaction( "Operation value before encoding as base64 should be 48 bytes long." ) + # verify any subsequent operations are manage data ops and source account is the server + for op in transaction.operations[1:]: + if not isinstance(op, ManageData): + raise InvalidSep10ChallengeError("Operation type should be ManageData.") + if op.source is None: + raise InvalidSep10ChallengeError("Operation should have a source account.") + if op.source != server_account_id: + raise InvalidSep10ChallengeError( + "The transaction has operations that are unrecognized." + ) + # verify that transaction envelope has a correct signature by server's signing key if not _verify_te_signed_by_account_id(transaction_envelope, server_account_id): raise InvalidSep10ChallengeError( @@ -215,7 +222,8 @@ def verify_challenge_transaction_signers( :param challenge_transaction: SEP0010 transaction challenge transaction in base64. :param server_account_id: public key for server's account. :param domain_name: The `fully qualified domain name <https://en.wikipedia.org/wiki/Fully_qualified_domain_name>`_ - of the service requiring authentication, for example: `example.com`. + of the service requiring authentication, for example: `example.com`. (The domain_name field is reserved for + future use and not used.) :param network_passphrase: The network to connect to for verifying and retrieving additional attributes from. (ex. 'Public Global Stellar Network ; September 2015') :param signers: The signers of client account. @@ -291,7 +299,8 @@ def verify_challenge_transaction_signed_by_client( :param challenge_transaction: SEP0010 transaction challenge transaction in base64. :param server_account_id: public key for server's account. :param domain_name: The `fully qualified domain name <https://en.wikipedia.org/wiki/Fully_qualified_domain_name>`_ - of the service requiring authentication, for example: `example.com`. + of the service requiring authentication, for example: `example.com`. (The domain_name field is reserved for + future use and not used.) :param network_passphrase: The network to connect to for verifying and retrieving additional attributes from. (ex. 'Public Global Stellar Network ; September 2015') @@ -320,7 +329,8 @@ def verify_challenge_transaction_signed_by_client_master_key( :param challenge_transaction: SEP0010 transaction challenge transaction in base64. :param server_account_id: public key for server's account. :param domain_name: The `fully qualified domain name <https://en.wikipedia.org/wiki/Fully_qualified_domain_name>`_ - of the service requiring authentication, for example: `example.com`. + of the service requiring authentication, for example: `example.com`. (The domain_name field is reserved for + future use and not used.) :param network_passphrase: The network to connect to for verifying and retrieving additional attributes from. (ex. 'Public Global Stellar Network ; September 2015') @@ -351,7 +361,8 @@ def verify_challenge_transaction_threshold( :param challenge_transaction: SEP0010 transaction challenge transaction in base64. :param server_account_id: public key for server's account. :param domain_name: The `fully qualified domain name <https://en.wikipedia.org/wiki/Fully_qualified_domain_name>`_ - of the service requiring authentication, for example: `example.com`. + of the service requiring authentication, for example: `example.com`. (The domain_name field is reserved for + future use and not used.) :param network_passphrase: The network to connect to for verifying and retrieving additional attributes from. (ex. 'Public Global Stellar Network ; September 2015') :param threshold: The medThreshold on the client account. @@ -401,7 +412,8 @@ def verify_challenge_transaction( :param challenge_transaction: SEP0010 transaction challenge transaction in base64. :param server_account_id: public key for server's account. :param domain_name: The `fully qualified domain name <https://en.wikipedia.org/wiki/Fully_qualified_domain_name>`_ - of the service requiring authentication, for example: `example.com`. + of the service requiring authentication, for example: `example.com`. (The domain_name field is reserved for + future use and not used.) :param network_passphrase: The network to connect to for verifying and retrieving additional attributes from. (ex. 'Public Global Stellar Network ; September 2015') :raises: :exc:`InvalidSep10ChallengeError <stellar_sdk.sep.exceptions.InvalidSep10ChallengeError>` - if the
Implement SEP-10 2.1.0 Support ## TL;DR The SEP-10 protocol has been updated to relax the requirements of the single Manage Data operation included in SEP-10 challenge transactions, and **should be implemented by October 21st**. On this date the SDF will announce the SDK updates to Anchors and Wallets, urging them to update as soon as possible. ## Background To provide context, the [SEP-10 2.0 changes](https://github.com/stellar/stellar-protocol/pull/708) replaced SEP-10 challenges' Manage Data operation `anchorName` key with a `home_domain` key. However, we quickly realized there was confusion and misunderstanding about the value of the `home_domain` parameter introduced in most SDKs earlier. Specifically, some users of the SDK believe the `home_domain` value is the home domain used to request the SEP-10 challenge, while others believe it is the home domain that requires the JWT authentication token. This is problematic because SDK's implemented equality checks on the `home_domain` value of the Manage Data operation, so mismatched domains would result in client-server (wallet-anchor) incompatibility. ## SEP-10 2.1.0 Changes The following changes have been made to the SEP-10 protocol: - Clients are no longer required to validate the `home_domain` value in a SEP-10 challenge's first Manage Data operation - SEP-10 challenge transactions may now have additional Manage Data operations as long as the source account of the operation is the SEP-10 server's `SIGNING_KEY` - Note: additional Manage Data operations will be added in future changes, so any added Manage Data operations added by the issuer of the challenge will need to come *after* the Manage Data operations defined in this document ## References [SEP-10 2.1.0 Changes](https://github.com/stellar/stellar-protocol/pull/740) [Go SDK SEP-10 2.1.0 Support](https://github.com/stellar/go/pull/3108) [JavaScript SDK SEP-10 2.1.0 Support](https://github.com/stellar/js-stellar-sdk/pull/580) ## Looking Forward Soon after SEP-10 2.1.0 is implmenented and announced, the SDF will ask SDK's to upgrade to SEP-10 3.0, but no action needs to be taken for this yet.
StellarCN/py-stellar-base
diff --git a/tests/sep/test_stellar_web_authentication.py b/tests/sep/test_stellar_web_authentication.py index 2f0efe18..d16d261a 100644 --- a/tests/sep/test_stellar_web_authentication.py +++ b/tests/sep/test_stellar_web_authentication.py @@ -140,34 +140,6 @@ class TestStellarWebAuthentication: network_passphrase, ) - def test_verify_challenge_tx_donot_contain_any_operation(self): - server_kp = Keypair.random() - client_kp = Keypair.random() - network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE - domain_name = "example.com" - now = int(time.time()) - server_account = Account(server_kp.public_key, -1) - challenge_te = ( - TransactionBuilder(server_account, network_passphrase, 100) - .add_time_bounds(now, now + 900) - .build() - ) - - challenge_te.sign(server_kp) - challenge_te.sign(client_kp) - challenge_tx_signed = challenge_te.to_xdr() - - with pytest.raises( - InvalidSep10ChallengeError, - match="Transaction requires a single ManageData operation.", - ): - verify_challenge_transaction( - challenge_tx_signed, - server_kp.public_key, - domain_name, - network_passphrase, - ) - def test_verify_challenge_tx_donot_contain_managedata_operation(self): server_kp = Keypair.random() client_kp = Keypair.random() @@ -422,34 +394,211 @@ class TestStellarWebAuthentication: network_passphrase, ) - def test_verify_challenge_transaction_domain_name_mismatch_raise(self): + def test_verify_challenge_tx_allows_any_value_in_home_domain_field(self): server_kp = Keypair.random() client_kp = Keypair.random() - timeout = 600 network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE domain_name = "example.com" - invalid_domain_name = "invalid_example.com" + now = int(time.time()) + nonce = os.urandom(48) + nonce_encoded = base64.b64encode(nonce) + server_account = Account(server_kp.public_key, -1) + challenge_te = ( + TransactionBuilder(server_account, network_passphrase, 100) + .append_manage_data_op( + data_name="{} auth".format(domain_name), + data_value=nonce_encoded, + source=client_kp.public_key, + ) + .add_time_bounds(now, now + 900) + .build() + ) - challenge = build_challenge_transaction( - server_secret=server_kp.secret, - client_account_id=client_kp.public_key, - domain_name=domain_name, - network_passphrase=network_passphrase, - timeout=timeout, + challenge_te.sign(server_kp) + challenge_te.sign(client_kp) + challenge_tx_signed = challenge_te.to_xdr() + + verify_challenge_transaction( + challenge_tx_signed, server_kp.public_key, None, network_passphrase, ) - transaction = TransactionEnvelope.from_xdr(challenge, network_passphrase) - transaction.sign(client_kp) - challenge_tx = transaction.to_xdr() + def test_verify_challenge_tx_contain_subsequent_manage_data_ops_with_server_account_as_source_account( + self, + ): + server_kp = Keypair.random() + client_kp = Keypair.random() + network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE + domain_name = "example.com" + now = int(time.time()) + nonce = os.urandom(48) + nonce_encoded = base64.b64encode(nonce) + server_account = Account(server_kp.public_key, -1) + challenge_te = ( + TransactionBuilder(server_account, network_passphrase, 100) + .append_manage_data_op( + data_name="{} auth".format(domain_name), + data_value=nonce_encoded, + source=client_kp.public_key, + ) + .append_manage_data_op( + data_name="data key", + data_value="data value", + source=server_kp.public_key, + ) + .add_time_bounds(now, now + 900) + .build() + ) + + challenge_te.sign(server_kp) + challenge_te.sign(client_kp) + challenge_tx_signed = challenge_te.to_xdr() + + verify_challenge_transaction( + challenge_tx_signed, server_kp.public_key, domain_name, network_passphrase, + ) + + def test_verify_challenge_tx_contain_subsequent_manage_data_ops_without_the_server_account_as_the_source_account( + self, + ): + server_kp = Keypair.random() + client_kp = Keypair.random() + network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE + domain_name = "example.com" + now = int(time.time()) + nonce = os.urandom(48) + nonce_encoded = base64.b64encode(nonce) + server_account = Account(server_kp.public_key, -1) + challenge_te = ( + TransactionBuilder(server_account, network_passphrase, 100) + .append_manage_data_op( + data_name="{} auth".format(domain_name), + data_value=nonce_encoded, + source=client_kp.public_key, + ) + .append_manage_data_op( + data_name="data key", + data_value="data value", + source=client_kp.public_key, + ) + .add_time_bounds(now, now + 900) + .build() + ) + + challenge_te.sign(server_kp) + challenge_te.sign(client_kp) + challenge_tx_signed = challenge_te.to_xdr() + with pytest.raises( InvalidSep10ChallengeError, - match="The transaction's operation key name " - "does not include the expected home domain.", + match="The transaction has operations that are unrecognized.", ): verify_challenge_transaction( - challenge_tx, + challenge_tx_signed, server_kp.public_key, - invalid_domain_name, + domain_name, + network_passphrase, + ) + + def test_verify_challenge_tx_contain_subsequent_ops_that_are_not_manage_data_ops( + self, + ): + server_kp = Keypair.random() + client_kp = Keypair.random() + network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE + domain_name = "example.com" + now = int(time.time()) + nonce = os.urandom(48) + nonce_encoded = base64.b64encode(nonce) + server_account = Account(server_kp.public_key, -1) + challenge_te = ( + TransactionBuilder(server_account, network_passphrase, 100) + .append_manage_data_op( + data_name="{} auth".format(domain_name), + data_value=nonce_encoded, + source=client_kp.public_key, + ) + .append_bump_sequence_op(bump_to=0, source=server_kp.public_key,) + .add_time_bounds(now, now + 900) + .build() + ) + + challenge_te.sign(server_kp) + challenge_te.sign(client_kp) + challenge_tx_signed = challenge_te.to_xdr() + + with pytest.raises( + InvalidSep10ChallengeError, match="Operation type should be ManageData.", + ): + verify_challenge_transaction( + challenge_tx_signed, + server_kp.public_key, + domain_name, + network_passphrase, + ) + + def test_verify_challenge_tx_contain_subsequent_ops_that_secend_op_no_source_account( + self, + ): + server_kp = Keypair.random() + client_kp = Keypair.random() + network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE + domain_name = "example.com" + now = int(time.time()) + nonce = os.urandom(48) + nonce_encoded = base64.b64encode(nonce) + server_account = Account(server_kp.public_key, -1) + challenge_te = ( + TransactionBuilder(server_account, network_passphrase, 100) + .append_manage_data_op( + data_name="{} auth".format(domain_name), + data_value=nonce_encoded, + source=client_kp.public_key, + ) + .append_manage_data_op(data_name="Hello", data_value="world") + .add_time_bounds(now, now + 900) + .build() + ) + + challenge_te.sign(server_kp) + challenge_te.sign(client_kp) + challenge_tx_signed = challenge_te.to_xdr() + + with pytest.raises( + InvalidSep10ChallengeError, match="Operation should have a source account.", + ): + verify_challenge_transaction( + challenge_tx_signed, + server_kp.public_key, + domain_name, + network_passphrase, + ) + + def test_verify_challenge_tx_contain_zero_op(self,): + server_kp = Keypair.random() + client_kp = Keypair.random() + network_passphrase = Network.PUBLIC_NETWORK_PASSPHRASE + domain_name = "example.com" + now = int(time.time()) + nonce = os.urandom(48) + server_account = Account(server_kp.public_key, -1) + challenge_te = ( + TransactionBuilder(server_account, network_passphrase, 100) + .add_time_bounds(now, now + 900) + .build() + ) + + challenge_te.sign(server_kp) + challenge_te.sign(client_kp) + challenge_tx_signed = challenge_te.to_xdr() + + with pytest.raises( + InvalidSep10ChallengeError, + match="Transaction should contain at least one operation.", + ): + verify_challenge_transaction( + challenge_tx_signed, + server_kp.public_key, + domain_name, network_passphrase, )
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
2.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-asyncio", "pytest-httpserver", "pytest-timeout" ], "pre_install": null, "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohappyeyeballs==2.4.4 aiohttp==3.10.11 aiohttp-sse-client==0.1.7 aiosignal==1.3.1 async-timeout==5.0.1 attrs==25.3.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 coverage==7.6.1 crc16==0.1.1 exceptiongroup==1.2.2 frozenlist==1.5.0 idna==3.10 iniconfig==2.1.0 MarkupSafe==2.1.5 mnemonic==0.19 multidict==6.1.0 packaging==24.2 pluggy==1.5.0 propcache==0.2.0 pycparser==2.22 PyNaCl==1.3.0 pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-timeout==2.3.1 pytest_httpserver==1.1.1 requests==2.32.3 six==1.17.0 stellar-base-sseclient==0.0.21 -e git+https://github.com/StellarCN/py-stellar-base.git@c353bdf449fdda0308cd5435b627fb111b18538e#egg=stellar_sdk toml==0.10.2 tomli==2.2.1 typing_extensions==4.13.0 urllib3==2.2.3 Werkzeug==3.0.6 yarl==1.15.2
name: py-stellar-base channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohappyeyeballs==2.4.4 - aiohttp==3.10.11 - aiohttp-sse-client==0.1.7 - aiosignal==1.3.1 - async-timeout==5.0.1 - attrs==25.3.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - coverage==7.6.1 - crc16==0.1.1 - exceptiongroup==1.2.2 - frozenlist==1.5.0 - idna==3.10 - iniconfig==2.1.0 - markupsafe==2.1.5 - mnemonic==0.19 - multidict==6.1.0 - packaging==24.2 - pluggy==1.5.0 - propcache==0.2.0 - pycparser==2.22 - pynacl==1.3.0 - pytest==8.3.5 - pytest-asyncio==0.24.0 - pytest-cov==5.0.0 - pytest-httpserver==1.1.1 - pytest-timeout==2.3.1 - requests==2.32.3 - six==1.17.0 - stellar-base-sseclient==0.0.21 - toml==0.10.2 - tomli==2.2.1 - typing-extensions==4.13.0 - urllib3==2.2.3 - werkzeug==3.0.6 - yarl==1.15.2 prefix: /opt/conda/envs/py-stellar-base
[ "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_allows_any_value_in_home_domain_field", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_contain_subsequent_manage_data_ops_with_server_account_as_source_account", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_contain_subsequent_manage_data_ops_without_the_server_account_as_the_source_account", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_contain_subsequent_ops_that_are_not_manage_data_ops", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_contain_subsequent_ops_that_secend_op_no_source_account", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_contain_zero_op" ]
[]
[ "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_challenge_transaction", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_sequence_not_zero", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_source_is_different_to_server_account_id", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_donot_contain_managedata_operation", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_operation_does_not_contain_the_source_account", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_operation_value_is_not_a_64_bytes_base64_string", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_transaction_is_not_signed_by_the_server", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_transaction_is_not_signed_by_the_client", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_dont_contains_timebound", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_contains_infinite_timebounds", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_tx_not_within_range_of_the_specified_timebounds", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_transaction_signatures", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_transaction_signatures_raise_no_signature", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signers", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signers_raise_no_signers", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signers_raise_no_client_signer_found", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signers_raise_no_server_signature", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signers_raise_unrecognized_signatures", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signed_by_client", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signed_by_client_raise_not_signed", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_threshold", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_threshold_raise_not_meet_threshold", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_read_challenge_transaction_fee_bump_transaction_raise", "tests/sep/test_stellar_web_authentication.py::TestStellarWebAuthentication::test_verify_challenge_transaction_signed_by_client_master_key_raise_unrecognized_signatures" ]
[]
Apache License 2.0
8,675
1,782
[ "stellar_sdk/sep/stellar_web_authentication.py" ]
robotframework__SeleniumLibrary-1677
ce07a56f990e40c571e3f47c3e10e5ed7c29ed03
2020-10-11 21:09:43
221b2a0bb95f2105aca2e62683781c2dcee82700
diff --git a/src/SeleniumLibrary/__init__.py b/src/SeleniumLibrary/__init__.py index 67fc57cd..de09287d 100644 --- a/src/SeleniumLibrary/__init__.py +++ b/src/SeleniumLibrary/__init__.py @@ -174,9 +174,10 @@ class SeleniumLibrary(DynamicCore): === Implicit XPath strategy === - If the locator starts with ``//`` or ``(//``, the locator is considered - to be an XPath expression. In other words, using ``//div`` is equivalent - to using explicit ``xpath://div``. + If the locator starts with ``//`` or multiple opening parenthesis in front + of the ``//``, the locator is considered to be an XPath expression. In other + words, using ``//div`` is equivalent to using explicit ``xpath://div`` and + ``((//div))`` is equivalent to using explicit ``xpath:((//div))`` Examples: @@ -184,6 +185,7 @@ class SeleniumLibrary(DynamicCore): | `Click Element` | (//div)[2] | The support for the ``(//`` prefix is new in SeleniumLibrary 3.0. + Supporting multiple opening parenthesis is new in SeleniumLibrary 5.0. === Chaining locators === diff --git a/src/SeleniumLibrary/locators/elementfinder.py b/src/SeleniumLibrary/locators/elementfinder.py index 7bd0c58d..6b904ac2 100644 --- a/src/SeleniumLibrary/locators/elementfinder.py +++ b/src/SeleniumLibrary/locators/elementfinder.py @@ -300,7 +300,7 @@ class ElementFinder(ContextAware): return tag, constraints def _parse_locator(self, locator): - if locator.startswith(("//", "(//")): + if re.match(r"\(*//", locator): return "xpath", locator index = self._get_locator_separator_index(locator) if index != -1:
Add support for xpath starting with ((// Currently, we are only treating `//` and `(//` as xpath expression Please help to add support also for `((//` For example, my xpath having union operator: ```((//label[contains(.,"ABC")]/input[@type="checkbox"]|//label[contains(.,"ABC")]/preceding-sibling::*[@type="checkbox"]))[1]``` It's still a vaid xpath expression, but unable to be detected in SeleniumLibrary Thank you
robotframework/SeleniumLibrary
diff --git a/utest/test/api/approved_files/PluginDocumentation.test_many_plugins.approved.txt b/utest/test/api/approved_files/PluginDocumentation.test_many_plugins.approved.txt index 35722d29..0603c647 100644 --- a/utest/test/api/approved_files/PluginDocumentation.test_many_plugins.approved.txt +++ b/utest/test/api/approved_files/PluginDocumentation.test_many_plugins.approved.txt @@ -117,9 +117,10 @@ Examples: === Implicit XPath strategy === -If the locator starts with ``//`` or ``(//``, the locator is considered -to be an XPath expression. In other words, using ``//div`` is equivalent -to using explicit ``xpath://div``. +If the locator starts with ``//`` or multiple opening parenthesis in front +of the ``//``, the locator is considered to be an XPath expression. In other +words, using ``//div`` is equivalent to using explicit ``xpath://div`` and +``((//div))`` is equivalent to using explicit ``xpath:((//div))`` Examples: @@ -127,6 +128,7 @@ Examples: | `Click Element` | (//div)[2] | The support for the ``(//`` prefix is new in SeleniumLibrary 3.0. +Supporting multiple opening parenthesis is new in SeleniumLibrary 5.0. === Chaining locators === diff --git a/utest/test/locators/test_elementfinder.py b/utest/test/locators/test_elementfinder.py index 75cb3318..1c595848 100644 --- a/utest/test/locators/test_elementfinder.py +++ b/utest/test/locators/test_elementfinder.py @@ -33,6 +33,8 @@ def teardown_function(): def test_implicit_xpath(): _verify_parse_locator("//foo", "xpath", "//foo") _verify_parse_locator("(//foo)", "xpath", "(//foo)") + _verify_parse_locator("((//foo))", "xpath", "((//foo))") + _verify_parse_locator("((((//foo))", "xpath", "((((//foo))") _verify_parse_locator("//id=bar", "xpath", "//id=bar") @@ -90,8 +92,9 @@ def test_registered_strategy_can_be_used_as_prefix(): def _verify_parse_locator(locator, prefix, criteria, finder=None): if not finder: finder = ElementFinder(None) - parse_locator = finder._parse_locator - assert parse_locator(locator), (prefix, criteria) + get_prefix, get_criteria = finder._parse_locator(locator) + assert get_prefix == prefix + assert get_criteria == criteria def test_parent_is_not_webelement(finder):
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
5.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-mockito", "pytest-approvaltests", "pytest-mock", "pytest-xdist" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
allpairspy==2.5.1 approval_utilities==14.3.1 approvaltests==14.3.1 attrs==25.3.0 beautifulsoup4==4.13.3 certifi==2025.1.31 charset-normalizer==3.4.1 empty-files==0.0.9 exceptiongroup==1.2.2 execnet==2.1.1 h11==0.14.0 idna==3.10 iniconfig==2.1.0 mock==5.2.0 mockito==1.5.4 mrjob==0.7.4 outcome==1.3.0.post0 packaging==24.2 pluggy==1.5.0 pyperclip==1.9.0 PySocks==1.7.1 pytest==8.3.5 pytest-approvaltests==0.2.4 pytest-mock==3.14.0 pytest-mockito==0.0.4 pytest-xdist==3.6.1 PyYAML==6.0.2 requests==2.32.3 robotframework==7.2.2 robotframework-pythonlibcore==4.4.1 -e git+https://github.com/robotframework/SeleniumLibrary.git@ce07a56f990e40c571e3f47c3e10e5ed7c29ed03#egg=robotframework_seleniumlibrary selenium==4.30.0 sniffio==1.3.1 sortedcontainers==2.4.0 soupsieve==2.6 testfixtures==8.3.0 tomli==2.2.1 trio==0.29.0 trio-websocket==0.12.2 typing_extensions==4.13.0 urllib3==2.3.0 websocket-client==1.8.0 wsproto==1.2.0
name: SeleniumLibrary channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - allpairspy==2.5.1 - approval-utilities==14.3.1 - approvaltests==14.3.1 - attrs==25.3.0 - beautifulsoup4==4.13.3 - certifi==2025.1.31 - charset-normalizer==3.4.1 - empty-files==0.0.9 - exceptiongroup==1.2.2 - execnet==2.1.1 - h11==0.14.0 - idna==3.10 - iniconfig==2.1.0 - mock==5.2.0 - mockito==1.5.4 - mrjob==0.7.4 - outcome==1.3.0.post0 - packaging==24.2 - pluggy==1.5.0 - pyperclip==1.9.0 - pysocks==1.7.1 - pytest==8.3.5 - pytest-approvaltests==0.2.4 - pytest-mock==3.14.0 - pytest-mockito==0.0.4 - pytest-xdist==3.6.1 - pyyaml==6.0.2 - requests==2.32.3 - robotframework==7.2.2 - robotframework-pythonlibcore==4.4.1 - robotframework-seleniumlibrary==5.0.0b2.dev1 - selenium==4.30.0 - sniffio==1.3.1 - sortedcontainers==2.4.0 - soupsieve==2.6 - testfixtures==8.3.0 - tomli==2.2.1 - trio==0.29.0 - trio-websocket==0.12.2 - typing-extensions==4.13.0 - urllib3==2.3.0 - websocket-client==1.8.0 - wsproto==1.2.0 prefix: /opt/conda/envs/SeleniumLibrary
[ "utest/test/locators/test_elementfinder.py::test_implicit_xpath" ]
[]
[ "utest/test/locators/test_elementfinder.py::test_no_separator", "utest/test/locators/test_elementfinder.py::test_equal_sign_as_separator", "utest/test/locators/test_elementfinder.py::test_colon_as_separator", "utest/test/locators/test_elementfinder.py::test_use_first_separator_when_both_are_used", "utest/test/locators/test_elementfinder.py::test_preserve_trailing_whitespace", "utest/test/locators/test_elementfinder.py::test_strategy_case_is_not_changed", "utest/test/locators/test_elementfinder.py::test_remove_whitespace_around_prefix_and_separator", "utest/test/locators/test_elementfinder.py::test_separator_without_matching_prefix_is_ignored", "utest/test/locators/test_elementfinder.py::test_registered_strategy_can_be_used_as_prefix", "utest/test/locators/test_elementfinder.py::test_parent_is_not_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_xpath_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_identifier_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_id_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_name_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_dom__parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_sizzle_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_link_text_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_partial_link_text_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_css_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_class_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_tag_name_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_sc_locator_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_find_by_default_parent_is_webelement", "utest/test/locators/test_elementfinder.py::test_non_existing_prefix", "utest/test/locators/test_elementfinder.py::test_find_with_no_tag", "utest/test/locators/test_elementfinder.py::test_find_with_explicit_default_strategy", "utest/test/locators/test_elementfinder.py::test_find_with_explicit_default_strategy_and_equals", "utest/test/locators/test_elementfinder.py::test_find_with_tag", "utest/test/locators/test_elementfinder.py::test_find_with_locator_with_apos", "utest/test/locators/test_elementfinder.py::test_find_with_locator_with_quote", "utest/test/locators/test_elementfinder.py::test_find_with_locator_with_quote_and_apos", "utest/test/locators/test_elementfinder.py::test_find_with_a", "utest/test/locators/test_elementfinder.py::test_find_with_link_synonym", "utest/test/locators/test_elementfinder.py::test_find_with_img", "utest/test/locators/test_elementfinder.py::test_find_with_image_synonym", "utest/test/locators/test_elementfinder.py::test_find_with_input", "utest/test/locators/test_elementfinder.py::test_find_with_radio_button_synonym", "utest/test/locators/test_elementfinder.py::test_find_with_checkbox_synonym", "utest/test/locators/test_elementfinder.py::test_find_with_file_upload_synonym", "utest/test/locators/test_elementfinder.py::test_find_with_text_field_synonym", "utest/test/locators/test_elementfinder.py::test_find_with_button", "utest/test/locators/test_elementfinder.py::test_find_with_select", "utest/test/locators/test_elementfinder.py::test_find_with_list_synonym", "utest/test/locators/test_elementfinder.py::test_find_with_implicit_xpath", "utest/test/locators/test_elementfinder.py::test_find_by_identifier", "utest/test/locators/test_elementfinder.py::test_find_by_id", "utest/test/locators/test_elementfinder.py::test_find_by_name", "utest/test/locators/test_elementfinder.py::test_find_by_xpath", "utest/test/locators/test_elementfinder.py::test_find_by_dom", "utest/test/locators/test_elementfinder.py::test_find_by_link_text", "utest/test/locators/test_elementfinder.py::test_find_by_partial_link_text", "utest/test/locators/test_elementfinder.py::test_find_by_css_selector", "utest/test/locators/test_elementfinder.py::test_find_by_class_names", "utest/test/locators/test_elementfinder.py::test_find_by_tag_name", "utest/test/locators/test_elementfinder.py::test_find_with_sloppy_prefix", "utest/test/locators/test_elementfinder.py::test_find_with_sloppy_criteria", "utest/test/locators/test_elementfinder.py::test_find_by_id_with_synonym_and_constraints", "utest/test/locators/test_elementfinder.py::test_find_returns_bad_values", "utest/test/locators/test_elementfinder.py::test_usage_of_multiple_locators_using_double_arrow_as_separator", "utest/test/locators/test_elementfinder.py::test_usage_of_multiple_locators_using_list", "utest/test/locators/test_elementfinder.py::test_localtor_split", "utest/test/locators/test_elementfinder.py::test_locator_split_with_non_strings" ]
[]
Apache License 2.0
8,678
482
[ "src/SeleniumLibrary/__init__.py", "src/SeleniumLibrary/locators/elementfinder.py" ]
0b01001001__spectree-64
a091fab020ac26548250c907bae0855273a98778
2020-10-12 13:21:50
a091fab020ac26548250c907bae0855273a98778
diff --git a/setup.py b/setup.py index 1b3cb64..4ef21e6 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f: setup( name='spectree', - version='0.3.7', + version='0.3.8', author='Keming Yang', author_email='[email protected]', description=('generate OpenAPI document and validate request&response ' diff --git a/spectree/utils.py b/spectree/utils.py index bb5698d..73d6c71 100644 --- a/spectree/utils.py +++ b/spectree/utils.py @@ -54,6 +54,7 @@ def parse_params(func, params, models): 'in': 'query', 'schema': schema, 'required': name in query.get('required', []), + 'description': schema.get('description', ''), }) if hasattr(func, 'headers'): @@ -64,6 +65,7 @@ def parse_params(func, params, models): 'in': 'header', 'schema': schema, 'required': name in headers.get('required', []), + 'description': schema.get('description', ''), }) if hasattr(func, 'cookies'): @@ -74,6 +76,7 @@ def parse_params(func, params, models): 'in': 'cookie', 'schema': schema, 'required': name in cookies.get('required', []), + 'description': schema.get('description', ''), }) return params
[BUG]description for query paramters can not show in swagger ui Hi, when I add a description for a schema used in query, it can not show in swagger ui but can show in Redoc ```py @HELLO.route('/', methods=['GET']) @api.validate(query=HelloForm) def hello(): """ hello 注释 :return: """ return 'ok' class HelloForm(BaseModel): """ hello表单 """ user: str # 用户名称 msg: str = Field(description='msg test', example='aa') index: int data: HelloGetListForm list: List[HelloListForm] ``` ![截屏2020-10-12 下午7 54 52](https://user-images.githubusercontent.com/60063723/95743785-de70f480-0cc4-11eb-857b-fffd3d7e9cdd.png) ![截屏2020-10-12 下午7 53 59](https://user-images.githubusercontent.com/60063723/95743805-e5980280-0cc4-11eb-99ae-11e6439bae02.png)
0b01001001/spectree
diff --git a/tests/common.py b/tests/common.py index 0f2d696..83b4140 100644 --- a/tests/common.py +++ b/tests/common.py @@ -1,7 +1,7 @@ from enum import IntEnum, Enum from typing import List -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, root_validator, Field class Order(IntEnum): @@ -43,7 +43,7 @@ class Cookies(BaseModel): class DemoModel(BaseModel): uid: int limit: int - name: str + name: str = Field(..., description='user name') def get_paths(spec): diff --git a/tests/test_utils.py b/tests/test_utils.py index bf3426d..53dd3e1 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -98,8 +98,10 @@ def test_parse_params(): 'name': 'uid', 'in': 'query', 'required': True, + 'description': '', 'schema': { 'title': 'Uid', 'type': 'integer', } } + assert params[2]['description'] == 'user name'
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[flask,falcon,starlette]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
annotated-types==0.7.0 anyio==4.9.0 blinker==1.9.0 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 exceptiongroup==1.2.2 falcon==4.0.2 Flask==3.1.0 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 itsdangerous==2.2.0 Jinja2==3.1.6 MarkupSafe==3.0.2 packaging==24.2 pluggy==1.5.0 pydantic==2.11.1 pydantic_core==2.33.0 pytest==8.3.5 requests==2.32.3 sniffio==1.3.1 -e git+https://github.com/0b01001001/spectree.git@a091fab020ac26548250c907bae0855273a98778#egg=spectree starlette==0.46.1 tomli==2.2.1 typing-inspection==0.4.0 typing_extensions==4.13.0 urllib3==2.3.0 Werkzeug==3.1.3 zipp==3.21.0
name: spectree channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - annotated-types==0.7.0 - anyio==4.9.0 - blinker==1.9.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - exceptiongroup==1.2.2 - falcon==4.0.2 - flask==3.1.0 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - itsdangerous==2.2.0 - jinja2==3.1.6 - markupsafe==3.0.2 - packaging==24.2 - pluggy==1.5.0 - pydantic==2.11.1 - pydantic-core==2.33.0 - pytest==8.3.5 - requests==2.32.3 - sniffio==1.3.1 - starlette==0.46.1 - tomli==2.2.1 - typing-extensions==4.13.0 - typing-inspection==0.4.0 - urllib3==2.3.0 - werkzeug==3.1.3 - zipp==3.21.0 prefix: /opt/conda/envs/spectree
[ "tests/test_utils.py::test_parse_params" ]
[]
[ "tests/test_utils.py::test_comments", "tests/test_utils.py::test_parse_code", "tests/test_utils.py::test_parse_name", "tests/test_utils.py::test_has_model", "tests/test_utils.py::test_parse_resp", "tests/test_utils.py::test_parse_request" ]
[]
Apache License 2.0
8,681
391
[ "setup.py", "spectree/utils.py" ]
cloud-custodian__cloud-custodian-6203
5dac93a58d2a01663ce8163418eb91b45872eb31
2020-10-12 14:35:14
ba7c6540540bac8215ac7b96b1fe50485da140ff
diff --git a/c7n/resources/ebs.py b/c7n/resources/ebs.py index 3eeb5ab41..e1699c4e9 100644 --- a/c7n/resources/ebs.py +++ b/c7n/resources/ebs.py @@ -513,6 +513,95 @@ class CopySnapshot(BaseAction): "Cross region copy complete %s", ",".join(copy_ids)) [email protected]_registry.register('set-permissions') +class SetPermissions(BaseAction): + """Action to set permissions for creating volumes from a snapshot + + Use the 'add' and 'remove' parameters to control which accounts to + add or remove respectively. The default is to remove any create + volume permissions granted to other AWS accounts. + + Combining this action with the 'cross-account' filter allows you + greater control over which accounts will be removed, e.g. using a + whitelist: + + :example: + + .. code-block:: yaml + + policies: + - name: ebs-dont-share-cross-account + resource: ebs-snapshot + filters: + - type: cross-account + whitelist: + - '112233445566' + actions: + - type: set-permissions + remove: matched + """ + schema = type_schema( + 'set-permissions', + remove={ + 'oneOf': [ + {'enum': ['matched']}, + {'type': 'array', 'items': { + 'type': 'string', 'minLength': 12, 'maxLength': 12}}, + ]}, + add={ + 'type': 'array', 'items': { + 'type': 'string', 'minLength': 12, 'maxLength': 12}}, + ) + + permissions = ('ec2:ModifySnapshotAttribute',) + + def validate(self): + if self.data.get('remove') == 'matched': + found = False + for f in self.manager.iter_filters(): + if isinstance(f, SnapshotCrossAccountAccess): + found = True + break + if not found: + raise PolicyValidationError( + "policy:%s filter:%s with matched requires cross-account filter" % ( + self.manager.ctx.policy.name, self.type)) + + def process(self, snapshots): + client = local_session(self.manager.session_factory).client('ec2') + for i in snapshots: + self.process_image(client, i) + + def process_image(self, client, snapshot): + add_accounts = self.data.get('add', []) + remove_accounts = self.data.get('remove', []) + if not add_accounts and not remove_accounts: + return client.reset_snapshot_attribute( + SnapshotId=snapshot['SnapshotId'], Attribute="createVolumePermission") + if remove_accounts == 'matched': + remove_accounts = snapshot.get( + 'c7n:' + SnapshotCrossAccountAccess.annotation_key) + + remove = [] + remove.extend([{'UserId': a} for a in remove_accounts if a != 'all']) + if 'all' in remove_accounts: + remove.append({'Group': 'all'}) + remove_accounts.remove('all') + + add = [{'UserId': a} for a in add_accounts] + + if remove: + client.modify_snapshot_attribute( + SnapshotId=snapshot['SnapshotId'], + CreateVolumePermission={'Remove': remove}, + OperationType='remove') + if add: + client.modify_snapshot_attribute( + SnapshotId=snapshot['SnapshotId'], + CreateVolumePermission={'Add': add}, + OperationType='add') + + @resources.register('ebs') class EBS(QueryResourceManager):
Ability to remove cross-account access to EBS snapshots for matched account IDs **Is your feature request related to a problem? Please describe.** I have cases where we share EBS snapshots with various account IDs. I can use filters to find EBS snapshots that are shared with unauthorised accounts, which would be any account _not_ contained in the `cross-account whitelist`. However, the only remediation available to me (from what I can see) is a `delete` action which deletes the snapshot. **Describe the solution you'd like** I would like to be able to remove the share to the unauthorised account, while still keeping the share to the accounts listed in the whitelist, without deleting the EBS snapshot. **Additional context** This is somewhat similar to the use case I described in #6008 (which was for AMIs).
cloud-custodian/cloud-custodian
diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshotAttribute_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshotAttribute_1.json new file mode 100644 index 000000000..26e543c67 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshotAttribute_1.json @@ -0,0 +1,12 @@ +{ + "status_code": 200, + "data": { + "CreateVolumePermissions": [ + { + "UserId": "665544332211" + } + ], + "SnapshotId": "snap-0ac64f0a1f16af706", + "ResponseMetadata": {} + } +} diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshotAttribute_2.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshotAttribute_2.json new file mode 100644 index 000000000..e1e85cb3e --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshotAttribute_2.json @@ -0,0 +1,12 @@ +{ + "status_code": 200, + "data": { + "CreateVolumePermissions": [ + { + "UserId": "112233445566" + } + ], + "SnapshotId": "snap-0ac64f0a1f16af706", + "ResponseMetadata": {} + } +} diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshots_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshots_1.json new file mode 100644 index 000000000..f1323ea45 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.DescribeSnapshots_1.json @@ -0,0 +1,28 @@ +{ + "status_code": 200, + "data": { + "Snapshots": [ + { + "Description": "", + "Encrypted": false, + "OwnerId": "644160558196", + "Progress": "100%", + "SnapshotId": "snap-0ac64f0a1f16af706", + "StartTime": { + "__class__": "datetime", + "year": 2020, + "month": 10, + "day": 14, + "hour": 21, + "minute": 8, + "second": 19, + "microsecond": 177000 + }, + "State": "completed", + "VolumeId": "vol-0309e0368c8e7c1b0", + "VolumeSize": 8 + } + ], + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.ModifySnapshotAttribute_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.ModifySnapshotAttribute_1.json new file mode 100644 index 000000000..5b2170a07 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.ModifySnapshotAttribute_1.json @@ -0,0 +1,6 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.ModifySnapshotAttribute_2.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.ModifySnapshotAttribute_2.json new file mode 100644 index 000000000..5b2170a07 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_add/ec2.ModifySnapshotAttribute_2.json @@ -0,0 +1,6 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshotAttribute_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshotAttribute_1.json new file mode 100644 index 000000000..643cf7603 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshotAttribute_1.json @@ -0,0 +1,15 @@ +{ + "status_code": 200, + "data": { + "CreateVolumePermissions": [ + { + "UserId": "112233445566" + }, + { + "UserId": "665544332211" + } + ], + "SnapshotId": "snap-0ac64f0a1f16af706", + "ResponseMetadata": {} + } +} diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshotAttribute_2.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshotAttribute_2.json new file mode 100644 index 000000000..e1e85cb3e --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshotAttribute_2.json @@ -0,0 +1,12 @@ +{ + "status_code": 200, + "data": { + "CreateVolumePermissions": [ + { + "UserId": "112233445566" + } + ], + "SnapshotId": "snap-0ac64f0a1f16af706", + "ResponseMetadata": {} + } +} diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshots_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshots_1.json new file mode 100644 index 000000000..f1323ea45 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.DescribeSnapshots_1.json @@ -0,0 +1,28 @@ +{ + "status_code": 200, + "data": { + "Snapshots": [ + { + "Description": "", + "Encrypted": false, + "OwnerId": "644160558196", + "Progress": "100%", + "SnapshotId": "snap-0ac64f0a1f16af706", + "StartTime": { + "__class__": "datetime", + "year": 2020, + "month": 10, + "day": 14, + "hour": 21, + "minute": 8, + "second": 19, + "microsecond": 177000 + }, + "State": "completed", + "VolumeId": "vol-0309e0368c8e7c1b0", + "VolumeSize": 8 + } + ], + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.ModifySnapshotAttribute_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.ModifySnapshotAttribute_1.json new file mode 100644 index 000000000..5b2170a07 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_matched/ec2.ModifySnapshotAttribute_1.json @@ -0,0 +1,6 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshotAttribute_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshotAttribute_1.json new file mode 100644 index 000000000..643cf7603 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshotAttribute_1.json @@ -0,0 +1,15 @@ +{ + "status_code": 200, + "data": { + "CreateVolumePermissions": [ + { + "UserId": "112233445566" + }, + { + "UserId": "665544332211" + } + ], + "SnapshotId": "snap-0ac64f0a1f16af706", + "ResponseMetadata": {} + } +} diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshotAttribute_2.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshotAttribute_2.json new file mode 100644 index 000000000..58ec8b600 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshotAttribute_2.json @@ -0,0 +1,8 @@ +{ + "status_code": 200, + "data": { + "CreateVolumePermissions": [], + "SnapshotId": "snap-0ac64f0a1f16af706", + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshots_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshots_1.json new file mode 100644 index 000000000..f1323ea45 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.DescribeSnapshots_1.json @@ -0,0 +1,28 @@ +{ + "status_code": 200, + "data": { + "Snapshots": [ + { + "Description": "", + "Encrypted": false, + "OwnerId": "644160558196", + "Progress": "100%", + "SnapshotId": "snap-0ac64f0a1f16af706", + "StartTime": { + "__class__": "datetime", + "year": 2020, + "month": 10, + "day": 14, + "hour": 21, + "minute": 8, + "second": 19, + "microsecond": 177000 + }, + "State": "completed", + "VolumeId": "vol-0309e0368c8e7c1b0", + "VolumeSize": 8 + } + ], + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.ResetSnapshotAttribute_1.json b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.ResetSnapshotAttribute_1.json new file mode 100644 index 000000000..5b2170a07 --- /dev/null +++ b/tests/data/placebo/test_ebs_snapshot_set_permissions_reset/ec2.ResetSnapshotAttribute_1.json @@ -0,0 +1,6 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/test_ebs.py b/tests/test_ebs.py index 72d1ce4fe..94ae048aa 100644 --- a/tests/test_ebs.py +++ b/tests/test_ebs.py @@ -334,6 +334,104 @@ class SnapshotTrimTest(BaseTest): self.assertEqual(len(resources), 1) +class SnapshotSetPermissions(BaseTest): + # The precondition to these tests in here is that we have an EBS + # snapshot with create volume permissions for accounts + # 112233445566 and 665544332211 + + def test_reset(self): + factory = self.replay_flight_data( + "test_ebs_snapshot_set_permissions_reset") + p = self.load_policy( + { + "name": "reset-permissions", + "resource": "ebs-snapshot", + "filters": ["cross-account"], + "actions": ["set-permissions"], + }, + session_factory=factory, + ) + p.validate() + resources = p.run() + self.assertEqual(len(resources), 1) + assert sorted( + resources[0]['c7n:CrossAccountViolations']) == sorted( + ['112233445566', '665544332211']) + client = factory().client('ec2') + perms = client.describe_snapshot_attribute( + SnapshotId=resources[0]['SnapshotId'], + Attribute='createVolumePermission')['CreateVolumePermissions'] + assert perms == [] + + def test_add(self): + # For this test, we assume only 665544332211 has permissions, + # and we test adding 112233445566 and removing 665544332211 + factory = self.replay_flight_data( + "test_ebs_snapshot_set_permissions_add") + p = self.load_policy( + { + "name": "set-permissions", + "resource": "ebs-snapshot", + "filters": ["cross-account"], + "actions": [ + { + "type": "set-permissions", + "add": ["112233445566"], + "remove": ["665544332211"], + }, + ], + }, + session_factory=factory, + ) + p.validate() + resources = p.run() + + self.assertEqual(len(resources), 1) + client = factory().client('ec2') + perms = client.describe_snapshot_attribute( + SnapshotId=resources[0]['SnapshotId'], + Attribute='createVolumePermission')['CreateVolumePermissions'] + assert perms == [ + {"UserId": "112233445566"}, + ] + + def test_matched(self): + factory = self.replay_flight_data( + "test_ebs_snapshot_set_permissions_matched") + p = self.load_policy( + { + "name": "set-permissions", + "resource": "ebs-snapshot", + "filters": [ + { + "type": "cross-account", + "whitelist": ["112233445566"], + }, + ], + "actions": [ + { + "type": "set-permissions", + "remove": "matched", + }, + ], + }, + session_factory=factory, + ) + p.validate() + resources = p.run() + + self.assertEqual(len(resources), 1) + self.assertEqual( + sorted(resources[0]['c7n:CrossAccountViolations']), + ['665544332211']) + + client = factory().client('ec2') + perms = client.describe_snapshot_attribute( + SnapshotId=resources[0]['SnapshotId'], + Attribute='createVolumePermission')['CreateVolumePermissions'] + assert perms == [{"UserId": "112233445566"}] + + class AttachedInstanceTest(BaseTest): def test_ebs_instance_filter(self):
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_issue_reference" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-terraform" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
apipkg==1.5 appdirs==1.4.4 argcomplete==1.12.1 attrs==20.3.0 aws-xray-sdk==2.6.0 bleach==3.2.1 boto3==1.16.19 botocore==1.19.19 -e git+https://github.com/cloud-custodian/cloud-custodian.git@5dac93a58d2a01663ce8163418eb91b45872eb31#egg=c7n certifi==2020.11.8 cffi==1.14.3 chardet==3.0.4 click==7.1.2 colorama==0.4.6 coverage==5.3 cryptography==3.2.1 distlib==0.3.1 docutils==0.16 exceptiongroup==1.2.2 execnet==1.7.1 filelock==3.0.12 flake8==3.8.4 future==0.18.2 idna==2.10 importlib-metadata==1.7.0 iniconfig==1.1.1 jeepney==0.5.0 jmespath==0.10.0 jsonpatch==1.26 jsonpickle==1.3 jsonpointer==2.0 jsonschema==3.2.0 keyring==21.5.0 mccabe==0.6.1 mock==4.0.2 more-itertools==8.6.0 multidict==5.0.2 packaging==20.4 pkginfo==1.6.1 placebo==0.9.0 pluggy==1.5.0 portalocker==1.7.1 psutil==5.7.3 py==1.9.0 pycodestyle==2.6.0 pycparser==2.20 pyflakes==2.2.0 Pygments==2.7.2 pyparsing==2.4.7 pyrsistent==0.17.3 pytest==8.3.5 pytest-cov==2.10.1 pytest-forked==1.3.0 pytest-mock==3.14.0 pytest-sugar==0.9.4 pytest-terraform==0.7.0 pytest-xdist==1.34.0 python-dateutil==2.8.1 PyYAML==5.3.1 readme-renderer==28.0 requests==2.25.0 requests-toolbelt==0.9.1 rfc3986==1.4.0 s3transfer==0.3.3 SecretStorage==3.2.0 six==1.15.0 tabulate==0.8.7 termcolor==1.1.0 toml==0.10.2 tomli==2.2.1 tox==3.20.1 tqdm==4.52.0 twine==3.2.0 urllib3==1.26.2 vcrpy==4.1.1 virtualenv==20.1.0 webencodings==0.5.1 wrapt==1.12.1 yarl==1.6.3 zipp==3.20.2
name: cloud-custodian channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - apipkg==1.5 - appdirs==1.4.4 - argcomplete==1.12.1 - attrs==20.3.0 - aws-xray-sdk==2.6.0 - bleach==3.2.1 - boto3==1.16.19 - botocore==1.19.19 - c7n==0.9.8 - certifi==2020.11.8 - cffi==1.14.3 - chardet==3.0.4 - click==7.1.2 - colorama==0.4.6 - coverage==5.3 - cryptography==3.2.1 - distlib==0.3.1 - docutils==0.16 - exceptiongroup==1.2.2 - execnet==1.7.1 - filelock==3.0.12 - flake8==3.8.4 - future==0.18.2 - idna==2.10 - importlib-metadata==1.7.0 - iniconfig==1.1.1 - jeepney==0.5.0 - jmespath==0.10.0 - jsonpatch==1.26 - jsonpickle==1.3 - jsonpointer==2.0 - jsonschema==3.2.0 - keyring==21.5.0 - mccabe==0.6.1 - mock==4.0.2 - more-itertools==8.6.0 - multidict==5.0.2 - packaging==20.4 - pkginfo==1.6.1 - placebo==0.9.0 - pluggy==1.5.0 - portalocker==1.7.1 - psutil==5.7.3 - py==1.9.0 - pycodestyle==2.6.0 - pycparser==2.20 - pyflakes==2.2.0 - pygments==2.7.2 - pyparsing==2.4.7 - pyrsistent==0.17.3 - pytest==8.3.5 - pytest-cov==2.10.1 - pytest-forked==1.3.0 - pytest-mock==3.14.0 - pytest-sugar==0.9.4 - pytest-terraform==0.7.0 - pytest-xdist==1.34.0 - python-dateutil==2.8.1 - pyyaml==5.3.1 - readme-renderer==28.0 - requests==2.25.0 - requests-toolbelt==0.9.1 - rfc3986==1.4.0 - s3transfer==0.3.3 - secretstorage==3.2.0 - six==1.15.0 - tabulate==0.8.7 - termcolor==1.1.0 - toml==0.10.2 - tomli==2.2.1 - tox==3.20.1 - tqdm==4.52.0 - twine==3.2.0 - urllib3==1.26.2 - vcrpy==4.1.1 - virtualenv==20.1.0 - webencodings==0.5.1 - wrapt==1.12.1 - yarl==1.6.3 - zipp==3.20.2 prefix: /opt/conda/envs/cloud-custodian
[ "tests/test_ebs.py::SnapshotSetPermissions::test_add", "tests/test_ebs.py::SnapshotSetPermissions::test_matched", "tests/test_ebs.py::SnapshotSetPermissions::test_reset" ]
[]
[ "tests/test_ebs.py::SnapshotQueryParse::test_invalid_query", "tests/test_ebs.py::SnapshotQueryParse::test_query", "tests/test_ebs.py::SnapshotErrorHandler::test_get_bad_snapshot_malformed", "tests/test_ebs.py::SnapshotErrorHandler::test_get_bad_snapshot_notfound", "tests/test_ebs.py::SnapshotErrorHandler::test_get_bad_volume_malformed", "tests/test_ebs.py::SnapshotErrorHandler::test_get_bad_volume_notfound", "tests/test_ebs.py::SnapshotErrorHandler::test_remove_snapshot", "tests/test_ebs.py::SnapshotErrorHandler::test_snapshot_copy_related_tags_missing_volumes", "tests/test_ebs.py::SnapshotErrorHandler::test_tag_error", "tests/test_ebs.py::SnapshotAccessTest::test_snapshot_access", "tests/test_ebs.py::SnapshotDetachTest::test_volume_detach", "tests/test_ebs.py::SnapshotCopyTest::test_snapshot_copy", "tests/test_ebs.py::SnapshotAmiSnapshotTest::test_snapshot_ami_snapshot_filter", "tests/test_ebs.py::SnapshotUnusedTest::test_snapshot_unused", "tests/test_ebs.py::SnapshotTrimTest::test_snapshot_trim", "tests/test_ebs.py::AttachedInstanceTest::test_ebs_instance_filter", "tests/test_ebs.py::ResizeTest::test_resize_action", "tests/test_ebs.py::ResizeTest::test_resize_filter", "tests/test_ebs.py::CopyInstanceTagsTest::test_copy_instance_tags", "tests/test_ebs.py::VolumePostFindingTest::test_volume_post_finding", "tests/test_ebs.py::VolumeSnapshotTest::test_volume_snapshot", "tests/test_ebs.py::VolumeSnapshotTest::test_volume_snapshot_copy_tags", "tests/test_ebs.py::VolumeSnapshotTest::test_volume_snapshot_copy_volume_tags", "tests/test_ebs.py::VolumeDeleteTest::test_volume_delete_force", "tests/test_ebs.py::EncryptExtantVolumesTest::test_encrypt_volumes", "tests/test_ebs.py::TestKmsAlias::test_ebs_kms_alias", "tests/test_ebs.py::EbsFaultToleranceTest::test_ebs_fault_tolerant", "tests/test_ebs.py::EbsFaultToleranceTest::test_ebs_non_fault_tolerant", "tests/test_ebs.py::PiopsMetricsFilterTest::test_ebs_metrics_percent_filter", "tests/test_ebs.py::HealthEventsFilterTest::test_ebs_health_events_filter" ]
[]
Apache License 2.0
8,682
851
[ "c7n/resources/ebs.py" ]
fitbenchmarking__fitbenchmarking-661
6641c44ab97f156badf34a1075791a21304df4a3
2020-10-13 09:07:04
6641c44ab97f156badf34a1075791a21304df4a3
diff --git a/fitbenchmarking/utils/options.py b/fitbenchmarking/utils/options.py index d650efa9..22ce6467 100644 --- a/fitbenchmarking/utils/options.py +++ b/fitbenchmarking/utils/options.py @@ -84,7 +84,7 @@ class Options(object): DEFAULT_FITTING = \ {'num_runs': 5, 'algorithm_type': 'all', - 'software': ['bumps', 'dfo', 'minuit', 'scipy', 'scipy_ls'], + 'software': ['scipy', 'scipy_ls'], 'use_errors': True, 'jac_method': ['scipy']} DEFAULT_JACOBIAN = \
Tests run by default If you install via `pip install fitbenchmarking` then only scipy minimizers are available. The default options should reflect this.
fitbenchmarking/fitbenchmarking
diff --git a/fitbenchmarking/systests/test_regression.py b/fitbenchmarking/systests/test_regression.py index 172c00ae..ea9ceaaa 100644 --- a/fitbenchmarking/systests/test_regression.py +++ b/fitbenchmarking/systests/test_regression.py @@ -208,6 +208,7 @@ def setup_options(multifit=False): opts.minimizers = {k: [v[0]] for k, v in opts.minimizers.items()} opts.software = sorted(opts.minimizers.keys()) else: + opts.software = ['bumps', 'dfo', 'minuit', 'scipy', 'scipy_ls'] opts.minimizers = {s: [opts.minimizers[s][0]] for s in opts.software} opts.results_dir = os.path.join(os.path.dirname(__file__), 'results') diff --git a/fitbenchmarking/utils/tests/test_options_fitting.py b/fitbenchmarking/utils/tests/test_options_fitting.py index 65783a37..e20737af 100644 --- a/fitbenchmarking/utils/tests/test_options_fitting.py +++ b/fitbenchmarking/utils/tests/test_options_fitting.py @@ -41,7 +41,7 @@ class FittingOptionTests(unittest.TestCase): """ Checks software default """ - expected = ['bumps', 'dfo', 'minuit', 'scipy', 'scipy_ls'] + expected = ['scipy', 'scipy_ls'] actual = self.options.software self.assertEqual(expected, actual)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[bumps,DFO,minuit,SAS]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc python3-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
bumps==0.9.3 certifi==2025.1.31 charset-normalizer==3.4.1 configparser==7.2.0 contourpy==1.3.0 coverage==4.5.4 coveralls==3.3.1 cycler==0.12.1 DFO-LS==1.5.4 DFOGN==1.0.2 docopt==0.6.2 docutils==0.21.2 exceptiongroup==1.2.2 -e git+https://github.com/fitbenchmarking/fitbenchmarking.git@6641c44ab97f156badf34a1075791a21304df4a3#egg=FitBenchmarking fonttools==4.56.0 h5py==3.13.0 idna==3.10 iminuit==2.30.1 importlib_resources==6.5.2 iniconfig==2.1.0 Jinja2==3.1.6 kiwisolver==1.4.7 lxml==5.3.1 MarkupSafe==3.0.2 matplotlib==3.9.4 numpy==2.0.2 packaging==24.2 pandas==2.2.3 pillow==11.1.0 pluggy==1.5.0 pyparsing==3.2.3 pytest==8.3.5 pytest-cov==2.10.1 python-coveralls==2.9.3 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 requests==2.32.3 sasmodels==1.0.9 scipy==1.13.1 six==1.17.0 tinycc==1.1 tomli==2.2.1 tzdata==2025.2 urllib3==1.23 zipp==3.21.0
name: fitbenchmarking channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - bumps==0.9.3 - certifi==2025.1.31 - charset-normalizer==3.4.1 - configparser==7.2.0 - contourpy==1.3.0 - coverage==4.5.4 - coveralls==3.3.1 - cycler==0.12.1 - dfo-ls==1.5.4 - dfogn==1.0.2 - docopt==0.6.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - fonttools==4.56.0 - h5py==3.13.0 - idna==3.10 - iminuit==2.30.1 - importlib-resources==6.5.2 - iniconfig==2.1.0 - jinja2==3.1.6 - kiwisolver==1.4.7 - lxml==5.3.1 - markupsafe==3.0.2 - matplotlib==3.9.4 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pillow==11.1.0 - pluggy==1.5.0 - pyparsing==3.2.3 - pytest==8.3.5 - pytest-cov==2.10.1 - python-coveralls==2.9.3 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - requests==2.32.3 - sasmodels==1.0.9 - scipy==1.13.1 - six==1.17.0 - tinycc==1.1 - tomli==2.2.1 - tzdata==2025.2 - urllib3==1.23 - zipp==3.21.0 prefix: /opt/conda/envs/fitbenchmarking
[ "fitbenchmarking/utils/tests/test_options_fitting.py::FittingOptionTests::test_software_default" ]
[]
[ "fitbenchmarking/utils/tests/test_options_fitting.py::FittingOptionTests::test_algorithm_type_default", "fitbenchmarking/utils/tests/test_options_fitting.py::FittingOptionTests::test_jac_method_default", "fitbenchmarking/utils/tests/test_options_fitting.py::FittingOptionTests::test_num_runs_default", "fitbenchmarking/utils/tests/test_options_fitting.py::FittingOptionTests::test_use_errors_default", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_invalid_option_key", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_algorithm_type_invalid", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_algorithm_type_valid", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_jac_method_invalid", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_jac_method_valid", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_num_runs_invalid", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_num_runs_valid", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_use_errors_invalid", "fitbenchmarking/utils/tests/test_options_fitting.py::UserFittingOptionTests::test_minimizer_use_errors_valid" ]
[]
BSD 3-Clause "New" or "Revised" License
8,689
167
[ "fitbenchmarking/utils/options.py" ]
googleapis__python-bigquery-326
3be78b737add7111e24e912cd02fc6df75a07de6
2020-10-14 13:24:57
b0dd892176e31ac25fddd15554b5bfa054299d4d
diff --git a/google/cloud/bigquery/model.py b/google/cloud/bigquery/model.py index 092d98c2..1143b71f 100644 --- a/google/cloud/bigquery/model.py +++ b/google/cloud/bigquery/model.py @@ -317,6 +317,14 @@ class Model(object): def __repr__(self): return "Model(reference={})".format(repr(self.reference)) + def to_api_repr(self): + """Construct the API resource representation of this model. + + Returns: + Dict[str, object]: Model reference represented as an API resource + """ + return json_format.MessageToDict(self._proto) + class ModelReference(object): """ModelReferences are pointers to models.
Add to_api_repr method to Model Same as #299 but for models: `AttributeError: 'Model' object has no attribute 'to_api_repr'` Current workaround: using `model._properties` but object fields like `featureColumns` https://cloud.google.com/bigquery/docs/reference/rest/v2/models#Model will be missing.
googleapis/python-bigquery
diff --git a/tests/unit/model/test_model.py b/tests/unit/model/test_model.py index 2c007942..9fa29a49 100644 --- a/tests/unit/model/test_model.py +++ b/tests/unit/model/test_model.py @@ -318,3 +318,47 @@ def test_repr(target_class): "Model(reference=ModelReference(" "project_id='my-proj', dataset_id='my_dset', model_id='my_model'))" ) + + +def test_to_api_repr(target_class): + from google.protobuf import json_format + + model = target_class("my-proj.my_dset.my_model") + resource = { + "etag": "abcdefg", + "modelReference": { + "projectId": "my-project", + "datasetId": "my_dataset", + "modelId": "my_model", + }, + "creationTime": "1274284800000", + "lastModifiedTime": "1317484800000", + "modelType": "LOGISTIC_REGRESSION", + "trainingRuns": [ + { + "trainingOptions": {"initialLearnRate": 1.0}, + "startTime": "2010-05-19T16:00:00Z", + }, + { + "trainingOptions": {"initialLearnRate": 0.5}, + "startTime": "2011-10-01T16:00:00Z", + }, + { + "trainingOptions": {"initialLearnRate": 0.25}, + "startTime": "2012-12-21T16:00:00Z", + }, + ], + "description": "A friendly description.", + "location": "US", + "friendlyName": "A friendly name.", + "labels": {"greeting": "こんにちは"}, + "expirationTime": "1356105600000", + "encryptionConfiguration": { + "kmsKeyName": "projects/1/locations/us/keyRings/1/cryptoKeys/1" + }, + } + model._proto = json_format.ParseDict( + resource, types.Model()._pb, ignore_unknown_fields=True + ) + got = model.to_api_repr() + assert got == resource
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.8", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.6.1 exceptiongroup==1.2.2 google-api-core==1.34.1 google-auth==2.38.0 -e git+https://github.com/googleapis/python-bigquery.git@3be78b737add7111e24e912cd02fc6df75a07de6#egg=google_cloud_bigquery google-cloud-bigquery-storage==2.30.0 google-cloud-core==1.5.0 google-crc32c==1.5.0 google-resumable-media==1.3.3 googleapis-common-protos==1.69.2 grpcio==1.70.0 grpcio-status==1.48.2 idna==3.10 iniconfig==2.1.0 numpy==1.24.4 opentelemetry-api==0.9b0 opentelemetry-instrumentation==0.9b0 opentelemetry-sdk==0.9b0 packaging==24.2 pandas==2.0.3 pluggy==1.5.0 proto-plus==1.26.1 protobuf==3.20.3 pyarrow==1.0.1 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest==8.3.5 pytest-cov==5.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.32.3 rsa==4.9 six==1.17.0 tomli==2.2.1 tqdm==4.67.1 tzdata==2025.2 urllib3==2.2.3 wrapt==1.17.2
name: python-bigquery channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.6.1 - exceptiongroup==1.2.2 - google-api-core==1.34.1 - google-auth==2.38.0 - google-cloud-bigquery-storage==2.30.0 - google-cloud-core==1.5.0 - google-crc32c==1.5.0 - google-resumable-media==1.3.3 - googleapis-common-protos==1.69.2 - grpcio==1.70.0 - grpcio-status==1.48.2 - idna==3.10 - iniconfig==2.1.0 - numpy==1.24.4 - opentelemetry-api==0.9b0 - opentelemetry-instrumentation==0.9b0 - opentelemetry-sdk==0.9b0 - packaging==24.2 - pandas==2.0.3 - pluggy==1.5.0 - proto-plus==1.26.1 - protobuf==3.20.3 - pyarrow==1.0.1 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-cov==5.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - tqdm==4.67.1 - tzdata==2025.2 - urllib3==2.2.3 - wrapt==1.17.2 prefix: /opt/conda/envs/python-bigquery
[ "tests/unit/model/test_model.py::test_to_api_repr" ]
[]
[ "tests/unit/model/test_model.py::test_ctor", "tests/unit/model/test_model.py::test_ctor_string", "tests/unit/model/test_model.py::test_from_api_repr", "tests/unit/model/test_model.py::test_from_api_repr_w_minimal_resource", "tests/unit/model/test_model.py::test_from_api_repr_w_unknown_fields", "tests/unit/model/test_model.py::test_build_resource[resource0-filter_fields0-expected0]", "tests/unit/model/test_model.py::test_build_resource[resource1-filter_fields1-expected1]", "tests/unit/model/test_model.py::test_build_resource[resource2-filter_fields2-expected2]", "tests/unit/model/test_model.py::test_build_resource[resource3-filter_fields3-expected3]", "tests/unit/model/test_model.py::test_build_resource[resource4-filter_fields4-expected4]", "tests/unit/model/test_model.py::test_build_resource[resource5-filter_fields5-expected5]", "tests/unit/model/test_model.py::test_set_description", "tests/unit/model/test_model.py::test_set_expires", "tests/unit/model/test_model.py::test_set_friendly_name", "tests/unit/model/test_model.py::test_set_labels", "tests/unit/model/test_model.py::test_replace_labels", "tests/unit/model/test_model.py::test_set_encryption_configuration", "tests/unit/model/test_model.py::test_repr" ]
[]
Apache License 2.0
8,698
175
[ "google/cloud/bigquery/model.py" ]
just-work__fffw-68
ec1451b6347ac0c9a8da3947651d7b15ebf0212d
2020-10-15 14:18:30
f5b9a860c6aca1a6c8cf8f5dfde5f84ed8e7aabf
diff --git a/fffw/encoding/filters.py b/fffw/encoding/filters.py index ff1baa2..1ef1505 100644 --- a/fffw/encoding/filters.py +++ b/fffw/encoding/filters.py @@ -3,7 +3,7 @@ from typing import Union, List, cast from fffw.graph import base from fffw.encoding import mixins -from fffw.graph.meta import Meta, VideoMeta, TS, Scene, VIDEO, AUDIO +from fffw.graph.meta import Meta, VideoMeta, TS, Scene, VIDEO, AUDIO, AudioMeta from fffw.graph.meta import StreamType, Device from fffw.wrapper.params import Params, param @@ -330,16 +330,26 @@ class Concat(Filter): duration = TS(0) scenes = [] streams: List[str] = [] + samples = 0 + sampling_rate = None for meta in metadata: duration += meta.duration + if isinstance(meta, AudioMeta): + samples += meta.samples + if sampling_rate is None: + sampling_rate = meta.sampling_rate + else: + assert sampling_rate == meta.sampling_rate scenes.extend(meta.scenes) for stream in meta.streams: if not streams or streams[-1] != stream: # Add all streams for each concatenated metadata and remove # contiguous duplicates. streams.append(stream) - return replace(metadata[0], duration=duration, - scenes=scenes, streams=streams) + kwargs = dict(duration=duration, scenes=scenes, streams=streams) + if samples != 0: + kwargs['samples'] = samples + return replace(metadata[0], **kwargs) @dataclass
Concat audio fails validation Samples count is not summed while concatenating audio. Looks like concatenating video doesn't sum frames count.
just-work/fffw
diff --git a/tests/test_graph.py b/tests/test_graph.py index 5e82f6e..905464c 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -35,7 +35,10 @@ class FilterGraphTestCase(TestCase): par=1.0, duration=300.0, ) - self.audio_metadata = audio_meta_data() + self.audio_metadata = audio_meta_data( + duration=200.0, + sampling_rate=48000, + samples_count=200*48000) self.source = inputs.Input( input_file='input.mp4', @@ -239,6 +242,25 @@ class FilterGraphTestCase(TestCase): self.assertEqual(self.video_metadata.duration + vs.meta.duration, vm.duration) + def test_concat_audio_metadata(self): + """ + Concat filter sums samples count for audio streams. + """ + audio_meta = audio_meta_data(duration=1000.0, sampling_rate=48000, + samples_count=48000 * 1000) + a = inputs.Stream(AUDIO, meta=audio_meta) + self.input_list.append(inputs.input_file('second.mp4', a)) + concat = a | Concat(AUDIO) + self.source | concat + + concat > self.output + + am = cast(AudioMeta, self.output.codecs[-1].get_meta_data()) + self.assertEqual(self.audio_metadata.duration + audio_meta.duration, + am.duration) + self.assertEqual(self.audio_metadata.samples + audio_meta.samples, + am.samples) + def test_trim_metadata(self): """ Trim filter sets start and changes stream duration.
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
3.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y ffmpeg mediainfo" ], "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 -e git+https://github.com/just-work/fffw.git@ec1451b6347ac0c9a8da3947651d7b15ebf0212d#egg=fffw iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pymediainfo==4.2.1 pytest==8.3.5 tomli==2.2.1
name: fffw channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pymediainfo==4.2.1 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/fffw
[ "tests/test_graph.py::FilterGraphTestCase::test_concat_audio_metadata" ]
[]
[ "tests/test_graph.py::FilterGraphTestCase::test_concat_metadata", "tests/test_graph.py::FilterGraphTestCase::test_disabled_filters", "tests/test_graph.py::FilterGraphTestCase::test_filter_graph", "tests/test_graph.py::FilterGraphTestCase::test_filter_validates_hardware_device", "tests/test_graph.py::FilterGraphTestCase::test_filter_validates_stream_kind", "tests/test_graph.py::FilterGraphTestCase::test_overlay_metadata", "tests/test_graph.py::FilterGraphTestCase::test_scale_changes_metadata", "tests/test_graph.py::FilterGraphTestCase::test_setpts_metadata", "tests/test_graph.py::FilterGraphTestCase::test_skip_not_connected_sources", "tests/test_graph.py::FilterGraphTestCase::test_trim_metadata" ]
[]
MIT License
8,709
401
[ "fffw/encoding/filters.py" ]
couler-proj__couler-89
c51142ebf2af3945abfbea30f5b50e8860687d3b
2020-10-15 14:40:49
3448a883648be2b65aa3819d481e8d1e31a4f762
diff --git a/couler/core/run_templates.py b/couler/core/run_templates.py index b94d5b6..35a08b4 100644 --- a/couler/core/run_templates.py +++ b/couler/core/run_templates.py @@ -99,6 +99,7 @@ def run_container( daemon=False, volume_mounts=None, working_dir=None, + node_selector=None, ): """ Generate an Argo container template. For example, the template whalesay @@ -176,6 +177,7 @@ def run_container( daemon=daemon, volume_mounts=volume_mounts, working_dir=working_dir, + node_selector=node_selector, ) states.workflow.add_template(template) diff --git a/couler/core/templates/container.py b/couler/core/templates/container.py index e3569f0..c73a489 100644 --- a/couler/core/templates/container.py +++ b/couler/core/templates/container.py @@ -42,6 +42,7 @@ class Container(Template): daemon=False, volume_mounts=None, working_dir=None, + node_selector=None, ): Template.__init__( self, @@ -63,6 +64,7 @@ class Container(Template): self.image_pull_policy = image_pull_policy self.volume_mounts = volume_mounts self.working_dir = working_dir + self.node_selector = node_selector def to_dict(self): template = Template.to_dict(self) @@ -108,6 +110,11 @@ class Container(Template): template["inputs"]["artifacts"] = _input_list + # Node selector + if self.node_selector is not None: + # TODO: Support inferring node selector values from Argo parameters + template["nodeSelector"] = self.node_selector + # Container if not utils.gpu_requested(self.resources): if self.env is None:
Support node selector for scheduling pods An example in Argo Workflows: https://github.com/argoproj/argo/blob/master/examples/node-selector.yaml cc @inohmonton99
couler-proj/couler
diff --git a/couler/tests/argo_test.py b/couler/tests/argo_test.py index 8e31ce1..8b4d925 100644 --- a/couler/tests/argo_test.py +++ b/couler/tests/argo_test.py @@ -90,6 +90,22 @@ class ArgoTest(unittest.TestCase): ) couler._cleanup() + def test_run_container_with_node_selector(self): + couler.run_container( + image="docker/whalesay:latest", + args=["echo -n hello world"], + command=["bash", "-c"], + step_name="A", + node_selector={"beta.kubernetes.io/arch": "amd64"}, + ) + + wf = couler.workflow_yaml() + self.assertEqual( + wf["spec"]["templates"][1]["nodeSelector"], + {"beta.kubernetes.io/arch": "amd64"}, + ) + couler._cleanup() + def test_run_container_with_workflow_volume(self): pvc = VolumeClaimTemplate("workdir") volume_mount = VolumeMount("workdir", "/mnt/vol") @@ -161,8 +177,8 @@ class ArgoTest(unittest.TestCase): self.assertTrue( params["value"] in [ - '"{{workflow.outputs.parameters.output-id-117}}"', - '"{{workflow.outputs.parameters.output-id-118}}"', + '"{{workflow.outputs.parameters.output-id-133}}"', + '"{{workflow.outputs.parameters.output-id-134}}"', ] ) # Check input parameters for step B
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pre-commit", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt", "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
argo-workflows==3.5.1 cachetools==5.5.2 certifi==2025.1.31 cfgv==3.4.0 charset-normalizer==3.4.1 -e git+https://github.com/couler-proj/couler.git@c51142ebf2af3945abfbea30f5b50e8860687d3b#egg=couler coverage==7.8.0 Deprecated==1.2.18 distlib==0.3.9 docker==4.1.0 durationpy==0.9 exceptiongroup==1.2.2 filelock==3.18.0 google-auth==2.38.0 identify==2.6.9 idna==3.10 iniconfig==2.1.0 kubernetes==32.0.1 mock==5.2.0 nodeenv==1.9.1 oauthlib==3.2.2 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 pyaml==25.1.0 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 PyYAML==6.0.2 requests==2.32.3 requests-oauthlib==2.0.0 rsa==4.9 six==1.17.0 tomli==2.2.1 urllib3==2.3.0 virtualenv==20.29.3 websocket-client==1.8.0 wrapt==1.17.2
name: couler channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argo-workflows==3.5.1 - cachetools==5.5.2 - certifi==2025.1.31 - cfgv==3.4.0 - charset-normalizer==3.4.1 - coverage==7.8.0 - deprecated==1.2.18 - distlib==0.3.9 - docker==4.1.0 - durationpy==0.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - google-auth==2.38.0 - identify==2.6.9 - idna==3.10 - iniconfig==2.1.0 - kubernetes==32.0.1 - mock==5.2.0 - nodeenv==1.9.1 - oauthlib==3.2.2 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - pyaml==25.1.0 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - requests==2.32.3 - requests-oauthlib==2.0.0 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - urllib3==2.3.0 - virtualenv==20.29.3 - websocket-client==1.8.0 - wrapt==1.17.2 prefix: /opt/conda/envs/couler
[ "couler/tests/argo_test.py::ArgoTest::test_run_container_with_node_selector" ]
[]
[ "couler/tests/argo_test.py::ArgoTest::test_create_job", "couler/tests/argo_test.py::ArgoTest::test_run_bash_script", "couler/tests/argo_test.py::ArgoTest::test_run_container_with_dependency_implicit_params_passing", "couler/tests/argo_test.py::ArgoTest::test_run_container_with_volume", "couler/tests/argo_test.py::ArgoTest::test_run_container_with_workflow_volume", "couler/tests/argo_test.py::ArgoTest::test_run_default_script", "couler/tests/argo_test.py::ArgoTest::test_run_job_with_dependency_implicit_params_passing_from_container", "couler/tests/argo_test.py::ArgoTest::test_run_job_with_dependency_implicit_params_passing_from_job", "couler/tests/argo_test.py::ArgoTest::test_run_none_source", "couler/tests/argo_test.py::ArgoTest::test_run_python_script", "couler/tests/argo_test.py::ArgoTest::test_set_dependencies_with_exit_handler" ]
[]
Apache License 2.0
8,710
457
[ "couler/core/run_templates.py", "couler/core/templates/container.py" ]
just-work__fffw-71
a5b5cacad54b502e9ee0ccd539e9dfcd7bf60eec
2020-10-15 16:25:01
f5b9a860c6aca1a6c8cf8f5dfde5f84ed8e7aabf
diff --git a/fffw/encoding/filters.py b/fffw/encoding/filters.py index 43ed444..2564388 100644 --- a/fffw/encoding/filters.py +++ b/fffw/encoding/filters.py @@ -157,6 +157,7 @@ class Scale(VideoFilter): :arg height: resulting video height """ filter = "scale" + hardware = None # cpu only width: int = param(name='w') height: int = param(name='h') @@ -202,10 +203,6 @@ class Split(AutoFilter): return '' return str(self.output_count) - def validate_edge_device(self, edge: base.Edge) -> None: - # Any device is supported - return - @dataclass class Trim(AutoFilter): @@ -355,10 +352,6 @@ class Concat(Filter): kwargs['samples'] = samples return replace(metadata[0], **kwargs) - def validate_edge_device(self, edge: base.Edge) -> None: - # Any device is supported - return - @dataclass class Overlay(VideoFilter): diff --git a/fffw/encoding/mixins.py b/fffw/encoding/mixins.py index e74310c..a0e4a2d 100644 --- a/fffw/encoding/mixins.py +++ b/fffw/encoding/mixins.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional from fffw.graph import base, VIDEO @@ -9,7 +9,7 @@ else: class StreamValidationMixin(StreamValidationTarget): - hardware: str + hardware: Optional[str] def connect_edge(self, edge: base.Edge) -> base.Edge: self.validate_edge_kind(edge) @@ -30,7 +30,11 @@ class StreamValidationMixin(StreamValidationTarget): meta = edge.get_meta_data(self) if meta is None: return - filter_hardware = getattr(self, 'hardware', None) + try: + filter_hardware = getattr(self, 'hardware') + except AttributeError: + # no hardware restrictions for filter/codec + return device = getattr(meta, 'device', None) edge_hardware = None if device is None else device.hardware if filter_hardware != edge_hardware:
Split and concat should validate against any hardware splitting and concatenation is supported in every hardware acceleration
just-work/fffw
diff --git a/tests/test_encoding.py b/tests/test_encoding.py index 005be16..959746a 100644 --- a/tests/test_encoding.py +++ b/tests/test_encoding.py @@ -1,3 +1,4 @@ +from dataclasses import dataclass from unittest import TestCase from fffw.graph import StreamType, VIDEO, AUDIO, video_meta_data @@ -81,8 +82,13 @@ class InputsTestCase(TestCase): hardware='cuda', device='foo') + @dataclass + class X264(VideoCodec): + codec = 'libx264' + hardware = None # cpu only + with self.assertRaises(ValueError): - src.video > VideoCodec('libx264') + src.video > X264() with self.assertRaises(ValueError): src.video | filters.Scale(640, 360) diff --git a/tests/test_graph.py b/tests/test_graph.py index 6b9bb4c..dd68ec9 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -306,16 +306,20 @@ class FilterGraphTestCase(TestCase): cuda = meta.Device(hardware='cuda', name='foo') self.source.video | Upload(device=cuda) | ScaleCuda(640, 360) - def test_concat_split_allows_any_hardware(self): + def test_any_hardware_filter(self): """ - Concat and split filters allow any hardware acceleration. + A filter may be defined that allows to be ran on any hardware """ + + @dataclass + class UniversalFilter(VideoFilter): + filter = 'filter' + # not setting hardware - universal filter + try: cuda = meta.Device(hardware='cuda', name='foo') - hw = self.source.video | Upload(device=cuda) - split = hw | Split(VIDEO, output_count=2) - concat = Concat(VIDEO, input_count=2) - split | concat - split | concat + s = self.source.video | Split(VIDEO) + s | UniversalFilter() + s | Upload(device=cuda) | UniversalFilter() except ValueError: # pragma: no cover self.fail("hardware validation unexpectedly failed")
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 2 }
3.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y ffmpeg mediainfo" ], "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 -e git+https://github.com/just-work/fffw.git@a5b5cacad54b502e9ee0ccd539e9dfcd7bf60eec#egg=fffw iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pymediainfo==4.2.1 pytest==8.3.5 tomli==2.2.1
name: fffw channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pymediainfo==4.2.1 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/fffw
[ "tests/test_graph.py::FilterGraphTestCase::test_any_hardware_filter" ]
[]
[ "tests/test_encoding.py::InputsTestCase::test_append_source", "tests/test_encoding.py::InputsTestCase::test_default_input", "tests/test_encoding.py::InputsTestCase::test_input_list", "tests/test_encoding.py::InputsTestCase::test_validate_input_hardware", "tests/test_encoding.py::InputsTestCase::test_validate_stream_kind", "tests/test_encoding.py::OutputsTestCase::test_codec_validates_hardware_device", "tests/test_encoding.py::OutputsTestCase::test_codec_validates_stream_kind", "tests/test_graph.py::FilterGraphTestCase::test_concat_audio_metadata", "tests/test_graph.py::FilterGraphTestCase::test_concat_metadata", "tests/test_graph.py::FilterGraphTestCase::test_disabled_filters", "tests/test_graph.py::FilterGraphTestCase::test_filter_graph", "tests/test_graph.py::FilterGraphTestCase::test_filter_validates_hardware_device", "tests/test_graph.py::FilterGraphTestCase::test_filter_validates_stream_kind", "tests/test_graph.py::FilterGraphTestCase::test_overlay_metadata", "tests/test_graph.py::FilterGraphTestCase::test_scale_changes_metadata", "tests/test_graph.py::FilterGraphTestCase::test_setpts_metadata", "tests/test_graph.py::FilterGraphTestCase::test_skip_not_connected_sources", "tests/test_graph.py::FilterGraphTestCase::test_trim_metadata" ]
[]
MIT License
8,715
579
[ "fffw/encoding/filters.py", "fffw/encoding/mixins.py" ]
arrow-py__arrow-868
fe9602e2171d49d0d95d775e71f9735c3aaf92e4
2020-10-15 19:27:08
37f4dbb5188f61cc8194992810f3b9b761b4e37d
codecov[bot]: # [Codecov](https://codecov.io/gh/arrow-py/arrow/pull/868?src=pr&el=h1) Report > Merging [#868](https://codecov.io/gh/arrow-py/arrow/pull/868?src=pr&el=desc) into [master](https://codecov.io/gh/arrow-py/arrow/commit/7cccf6a7034bea1616d1df413f90bf902be77a57?el=desc) will **not change** coverage. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/arrow-py/arrow/pull/868/graphs/tree.svg?width=650&height=150&src=pr&token=4EkGnFsIGn)](https://codecov.io/gh/arrow-py/arrow/pull/868?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #868 +/- ## ========================================= Coverage 100.00% 100.00% ========================================= Files 9 9 Lines 1809 1816 +7 Branches 312 312 ========================================= + Hits 1809 1816 +7 ``` | [Impacted Files](https://codecov.io/gh/arrow-py/arrow/pull/868?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [arrow/arrow.py](https://codecov.io/gh/arrow-py/arrow/pull/868/diff?src=pr&el=tree#diff-YXJyb3cvYXJyb3cucHk=) | `100.00% <100.00%> (ø)` | | | [arrow/util.py](https://codecov.io/gh/arrow-py/arrow/pull/868/diff?src=pr&el=tree#diff-YXJyb3cvdXRpbC5weQ==) | `100.00% <100.00%> (ø)` | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/arrow-py/arrow/pull/868?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/arrow-py/arrow/pull/868?src=pr&el=footer). Last update [7cccf6a...9dba7d7](https://codecov.io/gh/arrow-py/arrow/pull/868?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). systemcatch: > Looks good but I think we should get rid of the magic constants and instead refer to the static class constants. 👍 > Also are you planning to refactor and break up the humanize function in another PR? Yeah that can wait. systemcatch: Right so I've made changes based on both your suggestions, however there are still some problems to work out. For example: ```shell (arrow) chris@ThinkPad:~/arrow$ python Python 3.8.3 (default, Jul 7 2020, 18:57:36) [GCC 9.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import arrow >>> dt=arrow.utcnow() >>> later=dt.shift(years=1) >>> later.humanize(dt) 'in a year' >>> later.humanize(dt, granularity="year") 'in 0 years' ``` This happens because `_SECONDS_PER_YEAR` is `60*60*24*365.25` whereas I've used `60*60*24*365`. I tend to think it's not worth worrying about leap years in humanize. There will be a similar problem with months which I don't think is such an issue. jadchaar: > > > Right so I've made changes based on both your suggestions, however there are still some problems to work out. > > For example: > > ```shell > (arrow) chris@ThinkPad:~/arrow$ python > Python 3.8.3 (default, Jul 7 2020, 18:57:36) > [GCC 9.3.0] on linux > Type "help", "copyright", "credits" or "license" for more information. > >>> import arrow > >>> dt=arrow.utcnow() > >>> later=dt.shift(years=1) > >>> later.humanize(dt) > 'in a year' > >>> later.humanize(dt, granularity="year") > 'in 0 years' > ``` > > This happens because `_SECONDS_PER_YEAR` is `60*60*24*365.25` whereas I've used `60*60*24*365`. I tend to think it's not worth worrying about leap years in humanize. There will be a similar problem with months which I don't think is such an issue. Do you think we should define a class in constants called Humanize constants? Or even a static inner class of the Arrow object? Because I don't think these .25 adjustments for leap years should be used for humanize. Thoughts? systemcatch: > Do you think we should define a class in constants called Humanize constants? Or even a static inner class of the Arrow object? Because I don't think these .25 adjustments for leap years should be used for humanize. Thoughts? Maybe we just change `_SECS_PER_YEAR` to `60*60*24*365`? It's not used anywhere else apart from humanize. jadchaar: > > Do you think we should define a class in constants called Humanize constants? Or even a static inner class of the Arrow object? Because I don't think these .25 adjustments for leap years should be used for humanize. Thoughts? > > Maybe we just change `_SECS_PER_YEAR` to `60*60*24*365`? It's not used anywhere else apart from humanize. Yeah I think it makes most sense to accommodate it for where it is being used. What do you think? systemcatch: @jadchaar I've changed the value to `60*60*24*365` systemcatch: @jadchaar I made your suggested changes, we should be good to go here.
diff --git a/arrow/arrow.py b/arrow/arrow.py index 4fe9541..f3706b5 100644 --- a/arrow/arrow.py +++ b/arrow/arrow.py @@ -72,7 +72,7 @@ class Arrow(object): _SECS_PER_DAY = float(60 * 60 * 24) _SECS_PER_WEEK = float(60 * 60 * 24 * 7) _SECS_PER_MONTH = float(60 * 60 * 24 * 30.5) - _SECS_PER_YEAR = float(60 * 60 * 24 * 365.25) + _SECS_PER_YEAR = float(60 * 60 * 24 * 365) def __init__( self, @@ -1012,42 +1012,41 @@ class Arrow(object): if diff < 10: return locale.describe("now", only_distance=only_distance) - if diff < 45: + if diff < self._SECS_PER_MINUTE: seconds = sign * delta return locale.describe( "seconds", seconds, only_distance=only_distance ) - elif diff < 90: + elif diff < self._SECS_PER_MINUTE * 2: return locale.describe("minute", sign, only_distance=only_distance) - elif diff < 2700: - minutes = sign * int(max(delta / 60, 2)) + elif diff < self._SECS_PER_HOUR: + minutes = sign * int(max(delta / self._SECS_PER_MINUTE, 2)) return locale.describe( "minutes", minutes, only_distance=only_distance ) - elif diff < 5400: + elif diff < self._SECS_PER_HOUR * 2: return locale.describe("hour", sign, only_distance=only_distance) - elif diff < 79200: - hours = sign * int(max(delta / 3600, 2)) + elif diff < self._SECS_PER_DAY: + hours = sign * int(max(delta / self._SECS_PER_HOUR, 2)) return locale.describe("hours", hours, only_distance=only_distance) - - # anything less than 48 hours should be 1 day - elif diff < 172800: + elif diff < self._SECS_PER_DAY * 2: return locale.describe("day", sign, only_distance=only_distance) - elif diff < 554400: - days = sign * int(max(delta / 86400, 2)) + elif diff < self._SECS_PER_WEEK: + days = sign * int(max(delta / self._SECS_PER_DAY, 2)) return locale.describe("days", days, only_distance=only_distance) - elif diff < 907200: + elif diff < self._SECS_PER_WEEK * 2: return locale.describe("week", sign, only_distance=only_distance) - elif diff < 2419200: - weeks = sign * int(max(delta / 604800, 2)) + elif diff < self._SECS_PER_MONTH: + weeks = sign * int(max(delta / self._SECS_PER_WEEK, 2)) return locale.describe("weeks", weeks, only_distance=only_distance) - elif diff < 3888000: + elif diff < self._SECS_PER_MONTH * 2: return locale.describe("month", sign, only_distance=only_distance) - elif diff < 29808000: + elif diff < self._SECS_PER_YEAR: + # TODO revisit for humanization during leap years self_months = self._datetime.year * 12 + self._datetime.month other_months = dt.year * 12 + dt.month @@ -1057,10 +1056,10 @@ class Arrow(object): "months", months, only_distance=only_distance ) - elif diff < 47260800: + elif diff < self._SECS_PER_YEAR * 2: return locale.describe("year", sign, only_distance=only_distance) else: - years = sign * int(max(delta / 31536000, 2)) + years = sign * int(max(delta / self._SECS_PER_YEAR, 2)) return locale.describe("years", years, only_distance=only_distance) elif util.isstr(granularity):
humanize() rounding problem <!-- Thanks for taking the time to submit this bug report. Please provide us with a detailed description of the bug and a bit of information about your system. --> ## Issue Description Not sure if this is wanted or a bug. `humanize()` returns "a week ago" for deltas < 7 days. For example: ``` >>> arrow.utcnow().shift(days=-6, hours=-9).humanize() '6 days ago' >>> arrow.utcnow().shift(days=-6, hours=-10).humanize() 'a week ago' ``` I would expect "a week ago" to be returned after 7 days have passed. ## System Info - 🖥 **OS name and version**: Fedora 32 - 🐍 **Python version**: Python 3.8.5 - 🏹 **Arrow version**: arrow 0.16.0
arrow-py/arrow
diff --git a/tests/test_arrow.py b/tests/test_arrow.py index b0bd20a..872edc0 100644 --- a/tests/test_arrow.py +++ b/tests/test_arrow.py @@ -1772,11 +1772,12 @@ class TestArrowHumanize: ) == "37 months and 4 weeks" ) + # this will change when leap years are implemented assert ( self.now.humanize( later108onlydistance, only_distance=True, granularity=["year", "second"] ) - == "3 years and 5327200 seconds" + == "3 years and 5392000 seconds" ) one_min_one_sec_ago = self.now.shift(minutes=-1, seconds=-1) @@ -1909,16 +1910,26 @@ class TestArrowHumanize: assert self.now.humanize(later, only_distance=True) == "2 weeks" assert later.humanize(self.now, only_distance=True) == "2 weeks" + @pytest.mark.xfail(reason="known issue with humanize month limits") def test_month(self): later = self.now.shift(months=1) + # TODO this test now returns "4 weeks ago", we need to fix this to be correct on a per month basis assert self.now.humanize(later) == "a month ago" assert later.humanize(self.now) == "in a month" assert self.now.humanize(later, only_distance=True) == "a month" assert later.humanize(self.now, only_distance=True) == "a month" + def test_month_plus_4_days(self): + + # TODO needed for coverage, remove when month limits are fixed + later = self.now.shift(months=1, days=4) + + assert self.now.humanize(later) == "a month ago" + assert later.humanize(self.now) == "in a month" + def test_months(self): later = self.now.shift(months=2) @@ -1954,7 +1965,7 @@ class TestArrowHumanize: result = arw.humanize(self.datetime) - assert result == "in 2 years" + assert result == "in a year" def test_arrow(self): @@ -1998,6 +2009,16 @@ class TestArrowHumanize: assert result == "just now" + def test_week_limit(self): + # regression test for issue #848 + arw = arrow.Arrow.utcnow() + + later = arw.shift(weeks=+1) + + result = arw.humanize(later) + + assert result == "a week ago" + def test_untranslated_granularity(self, mocker): arw = arrow.Arrow.utcnow() @@ -2033,7 +2054,7 @@ class TestArrowHumanizeTestsWithLocale: result = arw.humanize(self.datetime, locale="ru") - assert result == "2 года назад" + assert result == "год назад" class TestArrowIsBetween:
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
0.17
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/arrow-py/arrow.git@fe9602e2171d49d0d95d775e71f9735c3aaf92e4#egg=arrow coverage==7.8.0 dateparser==0.7.6 exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==2.10.1 pytest-mock==3.14.0 python-dateutil==2.8.2 pytz==2019.3 regex==2024.11.6 simplejson==3.17.6 six==1.17.0 tomli==2.2.1 tzlocal==5.3.1
name: arrow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - dateparser==0.7.6 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==2.10.1 - pytest-mock==3.14.0 - python-dateutil==2.8.2 - pytz==2019.3 - regex==2024.11.6 - simplejson==3.17.6 - six==1.17.0 - tomli==2.2.1 - tzlocal==5.3.1 prefix: /opt/conda/envs/arrow
[ "tests/test_arrow.py::TestArrowHumanize::test_multiple_granularity", "tests/test_arrow.py::TestArrowHumanize::test_years", "tests/test_arrow.py::TestArrowHumanizeTestsWithLocale::test_years" ]
[]
[ "tests/test_arrow.py::TestTestArrowInit::test_init_bad_input", "tests/test_arrow.py::TestTestArrowInit::test_init", "tests/test_arrow.py::TestTestArrowInit::test_init_pytz_timezone", "tests/test_arrow.py::TestTestArrowInit::test_init_with_fold", "tests/test_arrow.py::TestTestArrowFactory::test_now", "tests/test_arrow.py::TestTestArrowFactory::test_utcnow", "tests/test_arrow.py::TestTestArrowFactory::test_fromtimestamp", "tests/test_arrow.py::TestTestArrowFactory::test_utcfromtimestamp", "tests/test_arrow.py::TestTestArrowFactory::test_fromdatetime", "tests/test_arrow.py::TestTestArrowFactory::test_fromdatetime_dt_tzinfo", "tests/test_arrow.py::TestTestArrowFactory::test_fromdatetime_tzinfo_arg", "tests/test_arrow.py::TestTestArrowFactory::test_fromdate", "tests/test_arrow.py::TestTestArrowFactory::test_strptime", "tests/test_arrow.py::TestTestArrowRepresentation::test_repr", "tests/test_arrow.py::TestTestArrowRepresentation::test_str", "tests/test_arrow.py::TestTestArrowRepresentation::test_hash", "tests/test_arrow.py::TestTestArrowRepresentation::test_format", "tests/test_arrow.py::TestTestArrowRepresentation::test_bare_format", "tests/test_arrow.py::TestTestArrowRepresentation::test_format_no_format_string", "tests/test_arrow.py::TestTestArrowRepresentation::test_clone", "tests/test_arrow.py::TestArrowAttribute::test_getattr_base", "tests/test_arrow.py::TestArrowAttribute::test_getattr_week", "tests/test_arrow.py::TestArrowAttribute::test_getattr_quarter", "tests/test_arrow.py::TestArrowAttribute::test_getattr_dt_value", "tests/test_arrow.py::TestArrowAttribute::test_tzinfo", "tests/test_arrow.py::TestArrowAttribute::test_naive", "tests/test_arrow.py::TestArrowAttribute::test_timestamp", "tests/test_arrow.py::TestArrowAttribute::test_int_timestamp", "tests/test_arrow.py::TestArrowAttribute::test_float_timestamp", "tests/test_arrow.py::TestArrowAttribute::test_getattr_fold", "tests/test_arrow.py::TestArrowAttribute::test_getattr_ambiguous", "tests/test_arrow.py::TestArrowAttribute::test_getattr_imaginary", "tests/test_arrow.py::TestArrowComparison::test_eq", "tests/test_arrow.py::TestArrowComparison::test_ne", "tests/test_arrow.py::TestArrowComparison::test_gt", "tests/test_arrow.py::TestArrowComparison::test_ge", "tests/test_arrow.py::TestArrowComparison::test_lt", "tests/test_arrow.py::TestArrowComparison::test_le", "tests/test_arrow.py::TestArrowMath::test_add_timedelta", "tests/test_arrow.py::TestArrowMath::test_add_other", "tests/test_arrow.py::TestArrowMath::test_radd", "tests/test_arrow.py::TestArrowMath::test_sub_timedelta", "tests/test_arrow.py::TestArrowMath::test_sub_datetime", "tests/test_arrow.py::TestArrowMath::test_sub_arrow", "tests/test_arrow.py::TestArrowMath::test_sub_other", "tests/test_arrow.py::TestArrowMath::test_rsub_datetime", "tests/test_arrow.py::TestArrowMath::test_rsub_other", "tests/test_arrow.py::TestArrowDatetimeInterface::test_date", "tests/test_arrow.py::TestArrowDatetimeInterface::test_time", "tests/test_arrow.py::TestArrowDatetimeInterface::test_timetz", "tests/test_arrow.py::TestArrowDatetimeInterface::test_astimezone", "tests/test_arrow.py::TestArrowDatetimeInterface::test_utcoffset", "tests/test_arrow.py::TestArrowDatetimeInterface::test_dst", "tests/test_arrow.py::TestArrowDatetimeInterface::test_timetuple", "tests/test_arrow.py::TestArrowDatetimeInterface::test_utctimetuple", "tests/test_arrow.py::TestArrowDatetimeInterface::test_toordinal", "tests/test_arrow.py::TestArrowDatetimeInterface::test_weekday", "tests/test_arrow.py::TestArrowDatetimeInterface::test_isoweekday", "tests/test_arrow.py::TestArrowDatetimeInterface::test_isocalendar", "tests/test_arrow.py::TestArrowDatetimeInterface::test_isoformat", "tests/test_arrow.py::TestArrowDatetimeInterface::test_simplejson", "tests/test_arrow.py::TestArrowDatetimeInterface::test_ctime", "tests/test_arrow.py::TestArrowDatetimeInterface::test_strftime", "tests/test_arrow.py::TestArrowFalsePositiveDst::test_dst", "tests/test_arrow.py::TestArrowConversion::test_to", "tests/test_arrow.py::TestArrowConversion::test_to_pacific_then_utc", "tests/test_arrow.py::TestArrowConversion::test_to_amsterdam_then_utc", "tests/test_arrow.py::TestArrowConversion::test_to_israel_same_offset", "tests/test_arrow.py::TestArrowConversion::test_anchorage_dst", "tests/test_arrow.py::TestArrowConversion::test_chicago_fall", "tests/test_arrow.py::TestArrowConversion::test_toronto_gap", "tests/test_arrow.py::TestArrowConversion::test_sydney_gap", "tests/test_arrow.py::TestArrowPickling::test_pickle_and_unpickle", "tests/test_arrow.py::TestArrowReplace::test_not_attr", "tests/test_arrow.py::TestArrowReplace::test_replace", "tests/test_arrow.py::TestArrowReplace::test_replace_tzinfo", "tests/test_arrow.py::TestArrowReplace::test_replace_fold", "tests/test_arrow.py::TestArrowReplace::test_replace_fold_and_other", "tests/test_arrow.py::TestArrowReplace::test_replace_week", "tests/test_arrow.py::TestArrowReplace::test_replace_quarter", "tests/test_arrow.py::TestArrowReplace::test_replace_quarter_and_fold", "tests/test_arrow.py::TestArrowReplace::test_replace_other_kwargs", "tests/test_arrow.py::TestArrowShift::test_not_attr", "tests/test_arrow.py::TestArrowShift::test_shift", "tests/test_arrow.py::TestArrowShift::test_shift_negative", "tests/test_arrow.py::TestArrowShift::test_shift_quarters_bug", "tests/test_arrow.py::TestArrowShift::test_shift_positive_imaginary", "tests/test_arrow.py::TestArrowShift::test_shift_negative_imaginary", "tests/test_arrow.py::TestArrowShift::test_shift_kiritimati", "tests/test_arrow.py::TestArrowRange::test_year", "tests/test_arrow.py::TestArrowRange::test_quarter", "tests/test_arrow.py::TestArrowRange::test_month", "tests/test_arrow.py::TestArrowRange::test_week", "tests/test_arrow.py::TestArrowRange::test_day", "tests/test_arrow.py::TestArrowRange::test_hour", "tests/test_arrow.py::TestArrowRange::test_minute", "tests/test_arrow.py::TestArrowRange::test_second", "tests/test_arrow.py::TestArrowRange::test_arrow", "tests/test_arrow.py::TestArrowRange::test_naive_tz", "tests/test_arrow.py::TestArrowRange::test_aware_same_tz", "tests/test_arrow.py::TestArrowRange::test_aware_different_tz", "tests/test_arrow.py::TestArrowRange::test_aware_tz", "tests/test_arrow.py::TestArrowRange::test_imaginary", "tests/test_arrow.py::TestArrowRange::test_unsupported", "tests/test_arrow.py::TestArrowRange::test_range_over_months_ending_on_different_days", "tests/test_arrow.py::TestArrowRange::test_range_over_quarter_months_ending_on_different_days", "tests/test_arrow.py::TestArrowRange::test_range_over_year_maintains_end_date_across_leap_year", "tests/test_arrow.py::TestArrowSpanRange::test_year", "tests/test_arrow.py::TestArrowSpanRange::test_quarter", "tests/test_arrow.py::TestArrowSpanRange::test_month", "tests/test_arrow.py::TestArrowSpanRange::test_week", "tests/test_arrow.py::TestArrowSpanRange::test_day", "tests/test_arrow.py::TestArrowSpanRange::test_days", "tests/test_arrow.py::TestArrowSpanRange::test_hour", "tests/test_arrow.py::TestArrowSpanRange::test_minute", "tests/test_arrow.py::TestArrowSpanRange::test_second", "tests/test_arrow.py::TestArrowSpanRange::test_naive_tz", "tests/test_arrow.py::TestArrowSpanRange::test_aware_same_tz", "tests/test_arrow.py::TestArrowSpanRange::test_aware_different_tz", "tests/test_arrow.py::TestArrowSpanRange::test_aware_tz", "tests/test_arrow.py::TestArrowSpanRange::test_bounds_param_is_passed", "tests/test_arrow.py::TestArrowInterval::test_incorrect_input", "tests/test_arrow.py::TestArrowInterval::test_correct", "tests/test_arrow.py::TestArrowInterval::test_bounds_param_is_passed", "tests/test_arrow.py::TestArrowSpan::test_span_attribute", "tests/test_arrow.py::TestArrowSpan::test_span_year", "tests/test_arrow.py::TestArrowSpan::test_span_quarter", "tests/test_arrow.py::TestArrowSpan::test_span_quarter_count", "tests/test_arrow.py::TestArrowSpan::test_span_year_count", "tests/test_arrow.py::TestArrowSpan::test_span_month", "tests/test_arrow.py::TestArrowSpan::test_span_week", "tests/test_arrow.py::TestArrowSpan::test_span_day", "tests/test_arrow.py::TestArrowSpan::test_span_hour", "tests/test_arrow.py::TestArrowSpan::test_span_minute", "tests/test_arrow.py::TestArrowSpan::test_span_second", "tests/test_arrow.py::TestArrowSpan::test_span_microsecond", "tests/test_arrow.py::TestArrowSpan::test_floor", "tests/test_arrow.py::TestArrowSpan::test_span_inclusive_inclusive", "tests/test_arrow.py::TestArrowSpan::test_span_exclusive_inclusive", "tests/test_arrow.py::TestArrowSpan::test_span_exclusive_exclusive", "tests/test_arrow.py::TestArrowSpan::test_bounds_are_validated", "tests/test_arrow.py::TestArrowHumanize::test_granularity", "tests/test_arrow.py::TestArrowHumanize::test_seconds", "tests/test_arrow.py::TestArrowHumanize::test_minute", "tests/test_arrow.py::TestArrowHumanize::test_minutes", "tests/test_arrow.py::TestArrowHumanize::test_hour", "tests/test_arrow.py::TestArrowHumanize::test_hours", "tests/test_arrow.py::TestArrowHumanize::test_day", "tests/test_arrow.py::TestArrowHumanize::test_days", "tests/test_arrow.py::TestArrowHumanize::test_week", "tests/test_arrow.py::TestArrowHumanize::test_weeks", "tests/test_arrow.py::TestArrowHumanize::test_month_plus_4_days", "tests/test_arrow.py::TestArrowHumanize::test_year", "tests/test_arrow.py::TestArrowHumanize::test_arrow", "tests/test_arrow.py::TestArrowHumanize::test_datetime_tzinfo", "tests/test_arrow.py::TestArrowHumanize::test_other", "tests/test_arrow.py::TestArrowHumanize::test_invalid_locale", "tests/test_arrow.py::TestArrowHumanize::test_none", "tests/test_arrow.py::TestArrowHumanize::test_week_limit", "tests/test_arrow.py::TestArrowHumanize::test_untranslated_granularity", "tests/test_arrow.py::TestArrowHumanizeTestsWithLocale::test_now", "tests/test_arrow.py::TestArrowHumanizeTestsWithLocale::test_seconds", "tests/test_arrow.py::TestArrowIsBetween::test_start_before_end", "tests/test_arrow.py::TestArrowIsBetween::test_exclusive_exclusive_bounds", "tests/test_arrow.py::TestArrowIsBetween::test_exclusive_exclusive_bounds_same_date", "tests/test_arrow.py::TestArrowIsBetween::test_inclusive_exclusive_bounds", "tests/test_arrow.py::TestArrowIsBetween::test_exclusive_inclusive_bounds", "tests/test_arrow.py::TestArrowIsBetween::test_inclusive_inclusive_bounds_same_date", "tests/test_arrow.py::TestArrowIsBetween::test_type_error_exception", "tests/test_arrow.py::TestArrowIsBetween::test_value_error_exception", "tests/test_arrow.py::TestArrowUtil::test_get_datetime", "tests/test_arrow.py::TestArrowUtil::test_get_tzinfo", "tests/test_arrow.py::TestArrowUtil::test_get_iteration_params" ]
[ "tests/test_arrow.py::TestArrowHumanize::test_months" ]
Apache License 2.0
8,718
1,073
[ "arrow/arrow.py" ]
googleapis__python-storage-298
3bf5c5213a3fac2058a07539e0a1e1c4497d5f07
2020-10-19 14:26:40
3bf5c5213a3fac2058a07539e0a1e1c4497d5f07
frankyn: @andrewsg PTAL when you have a moment. HemangChothani: @tseaver PTAL!
diff --git a/google/cloud/storage/blob.py b/google/cloud/storage/blob.py index f63303a..d7303e0 100644 --- a/google/cloud/storage/blob.py +++ b/google/cloud/storage/blob.py @@ -1608,6 +1608,8 @@ class Blob(_PropertyMixin): raise ValueError(msg) transport = self._get_transport(client) + if "metadata" in self._properties and "metadata" not in self._changes: + self._changes.add("metadata") info = self._get_upload_arguments(content_type) headers, object_metadata, content_type = info @@ -1775,6 +1777,8 @@ class Blob(_PropertyMixin): chunk_size = _DEFAULT_CHUNKSIZE transport = self._get_transport(client) + if "metadata" in self._properties and "metadata" not in self._changes: + self._changes.add("metadata") info = self._get_upload_arguments(content_type) headers, object_metadata, content_type = info if extra_headers is not None:
Blob.metadata not preserved when uploading new file content `blob.metadata` is not preserved when uploading new file content on an existing Blob instance with `blob.upload_from_string(...)` unless you first reset it with `blob.metadata = blob.metadata`. I suspect this bug / surprising behaviour is the result of the somewhat awkward mapping of the REST API onto mutable objects. It feels like setting metadata should really be a parameter of the blob upload methods instead of a mutable field on the instance. #### Environment details - OS type and version: Both locally on my Mac and within Google Cloud Functions - Python version: `python --version` Python 3.8.5 - pip version: `pip --version` pip 20.1.1 - `google-cloud-storage` version: `pip show google-cloud-storage` google-cloud-storage==1.31.2 #### Steps to reproduce See code example. #### Code example ```python >>> b = bucket.blob('testme1') >>> b.metadata = {'test': 'me'} >>> b.upload_from_string('foo') >>> b.metadata {'test': 'me'} >>> b.upload_from_string('bar') >>> b.metadata >>> # Note how b.metadata is not blank >>> b = bucket.blob('testme2') >>> b.metadata = {'test': 'me'} >>> b.upload_from_string('foo') >>> b.metadata {'test': 'me'} >>> b.metadata = b.metadata # <-- workaround >>> b.upload_from_string('bar') >>> b.metadata {'test': 'me'} >>> # Note how b.metadata is now preserved ```
googleapis/python-storage
diff --git a/tests/unit/test_blob.py b/tests/unit/test_blob.py index f713861..63f98eb 100644 --- a/tests/unit/test_blob.py +++ b/tests/unit/test_blob.py @@ -1828,12 +1828,17 @@ class Test_Blob(unittest.TestCase): if_metageneration_not_match=None, kms_key_name=None, timeout=None, + metadata=None, ): from six.moves.urllib.parse import urlencode bucket = _Bucket(name="w00t", user_project=user_project) blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) self.assertIsNone(blob.chunk_size) + if metadata: + self.assertIsNone(blob.metadata) + blob._properties["metadata"] = metadata + self.assertEqual(len(blob._changes), 0) # Create mocks to be checked for doing transport. transport = self._mock_transport(http_client.OK, {}) @@ -1906,10 +1911,18 @@ class Test_Blob(unittest.TestCase): upload_url += "?" + urlencode(qs_params) + blob_data = b'{"name": "blob-name"}\r\n' + if metadata: + blob_data = ( + b'{"name": "blob-name", "metadata": ' + + json.dumps(metadata).encode("utf-8") + + b"}\r\n" + ) + self.assertEqual(blob._changes, set(["metadata"])) payload = ( b"--==0==\r\n" + b"content-type: application/json; charset=UTF-8\r\n\r\n" - + b'{"name": "blob-name"}\r\n' + + blob_data + b"--==0==\r\n" + b"content-type: application/xml\r\n\r\n" + data_read @@ -1974,6 +1987,10 @@ class Test_Blob(unittest.TestCase): mock_get_boundary, if_generation_not_match=4, if_metageneration_not_match=4 ) + @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") + def test__do_multipart_upload_with_metadata(self, mock_get_boundary): + self._do_multipart_success(mock_get_boundary, metadata={"test": "test"}) + def test__do_multipart_upload_bad_size(self): blob = self._make_one(u"blob-name", bucket=None) @@ -2006,6 +2023,7 @@ class Test_Blob(unittest.TestCase): blob_chunk_size=786432, kms_key_name=None, timeout=None, + metadata=None, ): from six.moves.urllib.parse import urlencode from google.resumable_media.requests import ResumableUpload @@ -2013,7 +2031,12 @@ class Test_Blob(unittest.TestCase): bucket = _Bucket(name="whammy", user_project=user_project) blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) - blob.metadata = {"rook": "takes knight"} + if metadata: + self.assertIsNone(blob.metadata) + blob._properties["metadata"] = metadata + self.assertEqual(len(blob._changes), 0) + else: + blob.metadata = {"rook": "takes knight"} blob.chunk_size = blob_chunk_size if blob_chunk_size is not None: self.assertIsNotNone(blob.chunk_size) @@ -2022,8 +2045,11 @@ class Test_Blob(unittest.TestCase): # Need to make sure **same** dict is used because ``json.dumps()`` # will depend on the hash order. - object_metadata = blob._get_writable_metadata() - blob._get_writable_metadata = mock.Mock(return_value=object_metadata, spec=[]) + if not metadata: + object_metadata = blob._get_writable_metadata() + blob._get_writable_metadata = mock.Mock( + return_value=object_metadata, spec=[] + ) # Create mocks to be checked for doing transport. resumable_url = "http://test.invalid?upload_id=hey-you" @@ -2107,6 +2133,8 @@ class Test_Blob(unittest.TestCase): self.assertNotEqual(blob.chunk_size, chunk_size) self.assertEqual(upload._chunk_size, chunk_size) self.assertIs(upload._stream, stream) + if metadata: + self.assertEqual(blob._changes, set(["metadata"])) if size is None: self.assertIsNone(upload._total_bytes) else: @@ -2125,8 +2153,11 @@ class Test_Blob(unittest.TestCase): # Make sure we never read from the stream. self.assertEqual(stream.tell(), 0) - # Check the mocks. - blob._get_writable_metadata.assert_called_once_with() + if metadata: + object_metadata = {"name": u"blob-name", "metadata": metadata} + else: + # Check the mocks. + blob._get_writable_metadata.assert_called_once_with() payload = json.dumps(object_metadata).encode("utf-8") expected_headers = { "content-type": "application/json; charset=UTF-8", @@ -2144,6 +2175,9 @@ class Test_Blob(unittest.TestCase): timeout=expected_timeout, ) + def test__initiate_resumable_upload_with_metadata(self): + self._initiate_resumable_helper(metadata={"test": "test"}) + def test__initiate_resumable_upload_with_custom_timeout(self): self._initiate_resumable_helper(timeout=9.58)
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.32
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==4.2.4 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup==1.2.2 google-api-core==2.10.2 google-auth==1.35.0 google-cloud-core==1.7.3 -e git+https://github.com/googleapis/python-storage.git@3bf5c5213a3fac2058a07539e0a1e1c4497d5f07#egg=google_cloud_storage google-crc32c==1.7.1 google-resumable-media==1.3.3 googleapis-common-protos==1.69.2 idna==3.10 iniconfig==2.1.0 mock==5.2.0 packaging==24.2 pluggy==1.5.0 protobuf==4.25.6 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest==8.3.5 pytest-cov==6.0.0 requests==2.32.3 rsa==4.9 six==1.17.0 tomli==2.2.1 urllib3==2.3.0
name: python-storage channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==4.2.4 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - exceptiongroup==1.2.2 - google-api-core==2.10.2 - google-auth==1.35.0 - google-cloud-core==1.7.3 - google-crc32c==1.7.1 - google-resumable-media==1.3.3 - googleapis-common-protos==1.69.2 - idna==3.10 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - protobuf==4.25.6 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-cov==6.0.0 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/python-storage
[ "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_metadata", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_metadata" ]
[]
[ "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_checksum", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_checksum", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_wo_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_wo_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_bad_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_generation_not_match", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_kms", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_kms_with_version", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_data_corruption", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_predefined_acl", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_multipart", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_multipart_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_resumable", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_resumable_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__encryption_headers_w_encryption_key", "tests/unit/test_blob.py::Test_Blob::test__encryption_headers_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_default", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_explicit", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_from_blob", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_from_filename", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_generation", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_media_link", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_media_link_w_user_project", "tests/unit/test_blob.py::Test_Blob::test__get_transport", "tests/unit/test_blob.py::Test_Blob::test__get_upload_arguments", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_no_changes", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_unwritable_field", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_with_changes", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_chunk_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_extra_headers", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_generation_not_match", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_kms", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_kms_with_version", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_predefined_acl", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_without_chunk_size", "tests/unit/test_blob.py::Test_Blob::test__query_params_default", "tests/unit/test_blob.py::Test_Blob::test__query_params_w_generation", "tests/unit/test_blob.py::Test_Blob::test__query_params_w_user_project", "tests/unit/test_blob.py::Test_Blob::test__set_metadata_to_none", "tests/unit/test_blob.py::Test_Blob::test__set_properties_w_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test__set_properties_wo_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test_acl_property", "tests/unit/test_blob.py::Test_Blob::test_bucket_readonly_property", "tests/unit/test_blob.py::Test_Blob::test_cache_control_getter", "tests/unit/test_blob.py::Test_Blob::test_cache_control_setter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_ctor", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_getter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_setter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_setter_bad_value", "tests/unit/test_blob.py::Test_Blob::test_client", "tests/unit/test_blob.py::Test_Blob::test_component_count", "tests/unit/test_blob.py::Test_Blob::test_component_count_string_val", "tests/unit/test_blob.py::Test_Blob::test_component_count_unset", "tests/unit/test_blob.py::Test_Blob::test_compose_minimal_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_compose_w_additional_property_changes", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match_bad_length", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match_nones", "tests/unit/test_blob.py::Test_Blob::test_compose_wo_content_type_set", "tests/unit/test_blob.py::Test_Blob::test_content_disposition_getter", "tests/unit/test_blob.py::Test_Blob::test_content_disposition_setter", "tests/unit/test_blob.py::Test_Blob::test_content_encoding_getter", "tests/unit/test_blob.py::Test_Blob::test_content_encoding_setter", "tests/unit/test_blob.py::Test_Blob::test_content_language_getter", "tests/unit/test_blob.py::Test_Blob::test_content_language_setter", "tests/unit/test_blob.py::Test_Blob::test_content_type_getter", "tests/unit/test_blob.py::Test_Blob::test_content_type_setter", "tests/unit/test_blob.py::Test_Blob::test_crc32c_getter", "tests/unit/test_blob.py::Test_Blob::test_crc32c_setter", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_failure", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_origin", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_kms_key_name_and_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_ctor_with_encoded_unicode", "tests/unit/test_blob.py::Test_Blob::test_ctor_with_generation", "tests/unit/test_blob.py::Test_Blob::test_ctor_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_custom_time_getter", "tests/unit/test_blob.py::Test_Blob::test_custom_time_setter", "tests/unit/test_blob.py::Test_Blob::test_custom_time_setter_none_value", "tests/unit/test_blob.py::Test_Blob::test_custom_time_unset", "tests/unit/test_blob.py::Test_Blob::test_delete_w_generation", "tests/unit/test_blob.py::Test_Blob::test_delete_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_delete_wo_generation", "tests/unit/test_blob.py::Test_Blob::test_download_as_byte_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_string", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_hash_response_header_none", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_response_headers", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_encoding", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_chunks_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_chunks_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_with_failure", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_chunks_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_chunks_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_media_link", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_corrupted", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_key", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_updated_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_updated_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_wo_updated_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_wo_updated_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_etag", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_false", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_missing", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_true", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_setter", "tests/unit/test_blob.py::Test_Blob::test_exists_hit_w_generation", "tests/unit/test_blob.py::Test_Blob::test_exists_hit_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_exists_miss", "tests/unit/test_blob.py::Test_Blob::test_exists_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_domain_name_bucket", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_invalid_uri", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_valid_uri", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_no_version_passed_warning", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_content_md5", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_content_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_credentials", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_csek", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_csek_and_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_defaults", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_endpoint", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_expiration", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_generation", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_lowercase_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_non_ascii_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_response_disposition", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_response_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_bucket_bound_hostname_w_bare_hostname", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_bucket_bound_hostname_w_scheme", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_content_md5", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_content_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_credentials", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_csek", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_csek_and_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_defaults", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_endpoint", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_generation", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_lowercase_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_non_ascii_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_response_disposition", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_response_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_virtual_hostname", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_w_invalid_version", "tests/unit/test_blob.py::Test_Blob::test_generation", "tests/unit/test_blob.py::Test_Blob::test_generation_string_val", "tests/unit/test_blob.py::Test_Blob::test_generation_unset", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy_w_requested_policy_version", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_id", "tests/unit/test_blob.py::Test_Blob::test_make_private", "tests/unit/test_blob.py::Test_Blob::test_make_public", "tests/unit/test_blob.py::Test_Blob::test_md5_hash_getter", "tests/unit/test_blob.py::Test_Blob::test_md5_hash_setter", "tests/unit/test_blob.py::Test_Blob::test_media_link", "tests/unit/test_blob.py::Test_Blob::test_metadata_getter", "tests/unit/test_blob.py::Test_Blob::test_metadata_setter", "tests/unit/test_blob.py::Test_Blob::test_metadata_setter_w_nan", "tests/unit/test_blob.py::Test_Blob::test_metageneration", "tests/unit/test_blob.py::Test_Blob::test_metageneration_string_val", "tests/unit/test_blob.py::Test_Blob::test_metageneration_unset", "tests/unit/test_blob.py::Test_Blob::test_owner", "tests/unit/test_blob.py::Test_Blob::test_path_bad_bucket", "tests/unit/test_blob.py::Test_Blob::test_path_no_name", "tests/unit/test_blob.py::Test_Blob::test_path_normal", "tests/unit/test_blob.py::Test_Blob::test_path_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_path_with_non_ascii", "tests/unit/test_blob.py::Test_Blob::test_public_url", "tests/unit/test_blob.py::Test_Blob::test_public_url_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_public_url_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_public_url_with_non_ascii", "tests/unit/test_blob.py::Test_Blob::test_retention_expiration_time", "tests/unit/test_blob.py::Test_Blob::test_retention_expiration_time_unset", "tests/unit/test_blob.py::Test_Blob::test_rewrite_other_bucket_other_name_no_encryption_partial", "tests/unit/test_blob.py::Test_Blob::test_rewrite_response_without_resource", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_no_key_new_key_w_token", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_no_old_key_new_key_done_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_w_old_key_new_kms_key", "tests/unit/test_blob.py::Test_Blob::test_rewrite_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_rewrite_w_generations", "tests/unit/test_blob.py::Test_Blob::test_self_link", "tests/unit/test_blob.py::Test_Blob::test_set_iam_policy", "tests/unit/test_blob.py::Test_Blob::test_set_iam_policy_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_size", "tests/unit/test_blob.py::Test_Blob::test_size_string_val", "tests/unit/test_blob.py::Test_Blob::test_size_unset", "tests/unit/test_blob.py::Test_Blob::test_storage_class_getter", "tests/unit/test_blob.py::Test_Blob::test_storage_class_setter", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_false", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_missing", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_true", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_setter", "tests/unit/test_blob.py::Test_Blob::test_test_iam_permissions", "tests/unit/test_blob.py::Test_Blob::test_test_iam_permissions_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_time_created", "tests/unit/test_blob.py::Test_Blob::test_time_created_unset", "tests/unit/test_blob.py::Test_Blob::test_time_deleted", "tests/unit/test_blob.py::Test_Blob::test_time_deleted_unset", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_invalid", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_large_file", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_w_encryption_key_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_updated", "tests/unit/test_blob.py::Test_Blob::test_updated_unset", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_failure", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_success", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_retries", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_rewind", "tests/unit/test_blob.py::Test_Blob::test_upload_from_filename", "tests/unit/test_blob.py::Test_Blob::test_upload_from_filename_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_bytes", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_text", "tests/unit/test_blob.py::Test_Blob::test_user_project", "tests/unit/test_blob.py::Test__quote::test_bad_type", "tests/unit/test_blob.py::Test__quote::test_bytes", "tests/unit/test_blob.py::Test__quote::test_unicode", "tests/unit/test_blob.py::Test__quote::test_w_slash_default", "tests/unit/test_blob.py::Test__quote::test_w_slash_w_safe", "tests/unit/test_blob.py::Test__quote::test_w_tilde", "tests/unit/test_blob.py::Test__maybe_rewind::test_default", "tests/unit/test_blob.py::Test__maybe_rewind::test_do_not_rewind", "tests/unit/test_blob.py::Test__maybe_rewind::test_do_rewind", "tests/unit/test_blob.py::Test__raise_from_invalid_response::test_default", "tests/unit/test_blob.py::Test__raise_from_invalid_response::test_w_206_and_args", "tests/unit/test_blob.py::Test__add_query_parameters::test_w_empty_list", "tests/unit/test_blob.py::Test__add_query_parameters::test_w_existing_qs", "tests/unit/test_blob.py::Test__add_query_parameters::test_wo_existing_qs" ]
[]
Apache License 2.0
8,746
243
[ "google/cloud/storage/blob.py" ]
bridgecrewio__checkov-616
171e6a200bc9317b574e6e5aeef52049370a1cf8
2020-10-19 19:20:52
25b466be980e420e64519fbf74c93a35a0c94203
diff --git a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py index b84c8740a..0aa7c847b 100644 --- a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py +++ b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py @@ -1,5 +1,5 @@ -from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck from checkov.common.models.enums import CheckResult, CheckCategories +from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class GKEBasicAuth(BaseResourceCheck): @@ -18,7 +18,13 @@ class GKEBasicAuth(BaseResourceCheck): :return: <CheckResult> """ if 'master_auth' in conf.keys(): - if conf['master_auth'][0].get('username') or conf['master_auth'][0].get('password'): + username = conf['master_auth'][0].get('username') + password = conf['master_auth'][0].get('password') + if username or password: + # only if both are set to the empty string it is fine + # https://www.terraform.io/docs/providers/google/r/container_cluster.html + if len(username) == 1 and len(password) == 1 and username[0] == '' and password[0] == '': + return CheckResult.PASSED return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED
Check: CKV_GCP_19 False positive **Describe the bug** Checkov Will flag your code even if the `basic-auth` is already disabled on your cluster. **To Reproduce** Steps to reproduce the behavior: 1. Have a file as follows: ``` resource "google_container_cluster" "cluster-test" { name = "cluster-test" location = "europe-west1-c" provider = google-beta remove_default_node_pool = true initial_node_count = 1 enable_shielded_nodes = true release_channel { channel = "RAPID" } pod_security_policy_config { enabled = true } master_auth { username = "" password = "" client_certificate_config { issue_client_certificate = false } } } ``` 2. Run cli command 'checkov -d path/to/your/terraform/folder.' 3. See error: ``` Check: CKV_GCP_19: "Ensure GKE basic auth is disabled" FAILED for resource: google_container_cluster.cluster-test File: /cluster.tf:1-27 Guide: https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_11 1 | resource "google_container_cluster" "cluster-test" { 2 | name = "cluster-test" 3 | location = "europe-west1-c" 4 | provider = google-beta 5 | 6 | remove_default_node_pool = true 7 | initial_node_count = 1 8 | 9 | enable_shielded_nodes = true 10 | 11 | release_channel { 12 | channel = "RAPID" 13 | } 14 | 15 | pod_security_policy_config { 16 | enabled = true 17 | } 18 | 19 | master_auth { 20 | username = "" 21 | password = "" 22 | 23 | client_certificate_config { 24 | issue_client_certificate = false 25 | } 26 | } 27 | } ``` **Expected behavior** ``` Check: CKV_GCP_19: "Ensure GKE basic auth is disabled" PASSED for resource: google_container_cluster.cluster-test File: /cluster.tf:1-27 Guide: https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_7 ``` The `basic-auth` is already supposed to be disabled using this bit of code as: ``` master_auth { username = "" password = "" client_certificate_config { issue_client_certificate = false } } ``` **Environment:** - CI: Github Actions - OS: Ubuntu-lastest - Checkov Version [latest] - Terraform v0.12.24 **Additional context** Tested and installed today following documentation available here: https://www.checkov.io/1.Introduction/Getting%20Started.html
bridgecrewio/checkov
diff --git a/tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py b/tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py index 5278ee8b4..1469e3d5b 100644 --- a/tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py +++ b/tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py @@ -1,23 +1,80 @@ import unittest -from checkov.terraform.checks.resource.gcp.GKEBasicAuth import check from checkov.common.models.enums import CheckResult +from checkov.terraform.checks.resource.gcp.GKEBasicAuth import check class TestGKEBasicAuth(unittest.TestCase): def test_failure(self): - resource_conf = {'name': ['google_cluster_bad'], 'monitoring_service': ['none'], 'enable_legacy_abac': [True], 'master_authorized_networks_config': [{'cidr_blocks': [{'cidr_block': ['0.0.0.0/0'], 'display_name': ['The world']}]}], 'master_auth': [{'username': ['test'], 'password': ['password']}]} + resource_conf = { + 'name': ['google_cluster_bad'], + 'monitoring_service': ['none'], + 'enable_legacy_abac': [True], + 'master_authorized_networks_config': [ + { + 'cidr_blocks': [ + { + 'cidr_block': ['0.0.0.0/0'], + 'display_name': ['The world'], + } + ] + } + ], + 'master_auth': [ + { + 'username': ['test'], + 'password': ['password'], + } + ], + } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_failure2(self): - resource_conf = {'name': ['google_cluster'], 'monitoring_service': ['monitoring.googleapis.com'], 'master_authorized_networks_config': [{}]} + resource_conf = { + 'name': ['google_cluster'], + 'monitoring_service': ['monitoring.googleapis.com'], + 'master_authorized_networks_config': [{}], + } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_success(self): - resource_conf = {'name': ['google_cluster'], 'monitoring_service': ['monitoring.googleapis.com'], 'master_authorized_networks_config': [{}], 'master_auth': [{'client_certificate_config': [{'issue_client_certificate': [False]}]}]} + resource_conf = { + 'name': ['google_cluster'], + 'monitoring_service': ['monitoring.googleapis.com'], + 'master_authorized_networks_config': [{}], + 'master_auth': [ + { + 'client_certificate_config': [ + { + 'issue_client_certificate': [False], + } + ], + } + ], + } + scan_result = check.scan_resource_conf(conf=resource_conf) + self.assertEqual(CheckResult.PASSED, scan_result) + + def test_success_no_basic_out_if_username_and_password_are_present_but_empty(self): + resource_conf = { + 'name': ['google_cluster'], + 'monitoring_service': ['monitoring.googleapis.com'], + 'master_authorized_networks_config': [{}], + 'master_auth': [ + { + 'username': [''], + 'password': [''], + 'client_certificate_config': [ + { + 'issue_client_certificate': [False], + } + ], + } + ] + } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y git" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work bc-python-hcl2==0.3.51 boto3==1.12.43 botocore==1.15.49 certifi @ file:///croot/certifi_1671487769961/work/certifi chardet==3.0.4 -e git+https://github.com/bridgecrewio/checkov.git@171e6a200bc9317b574e6e5aeef52049370a1cf8#egg=checkov colorama==0.4.3 docopt==0.6.2 docutils==0.15.2 dpath==1.5.0 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core gitdb==4.0.5 GitPython==3.1.7 idna==2.8 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work importlib-resources==5.12.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==0.10.0 junit-xml==1.8 lark==1.1.9 lark-parser==0.7.8 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 python-dateutil==2.9.0.post0 PyYAML==5.3.1 requests==2.22.0 s3transfer==0.3.7 six==1.15.0 smmap==3.0.5 tabulate==0.8.6 termcolor==1.1.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work tqdm==4.49.0 typing_extensions @ file:///croot/typing_extensions_1669924550328/work update-checker==0.18.0 urllib3==1.25.10 zipp @ file:///croot/zipp_1672387121353/work
name: checkov channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - bc-python-hcl2==0.3.51 - boto3==1.12.43 - botocore==1.15.49 - chardet==3.0.4 - colorama==0.4.3 - docopt==0.6.2 - docutils==0.15.2 - dpath==1.5.0 - gitdb==4.0.5 - gitpython==3.1.7 - idna==2.8 - importlib-resources==5.12.0 - jmespath==0.10.0 - junit-xml==1.8 - lark==1.1.9 - lark-parser==0.7.8 - python-dateutil==2.9.0.post0 - pyyaml==5.3.1 - requests==2.22.0 - s3transfer==0.3.7 - six==1.15.0 - smmap==3.0.5 - tabulate==0.8.6 - termcolor==1.1.0 - tqdm==4.49.0 - update-checker==0.18.0 - urllib3==1.25.10 prefix: /opt/conda/envs/checkov
[ "tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py::TestGKEBasicAuth::test_success_no_basic_out_if_username_and_password_are_present_but_empty" ]
[]
[ "tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py::TestGKEBasicAuth::test_failure", "tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py::TestGKEBasicAuth::test_failure2", "tests/terraform/checks/resource/gcp/test_GKEBasicAuth.py::TestGKEBasicAuth::test_success" ]
[]
Apache License 2.0
8,750
362
[ "checkov/terraform/checks/resource/gcp/GKEBasicAuth.py" ]
mir-dataset-loaders__mirdata-295
f8636612821caa78706c0b56def9084ae5ffd711
2020-10-20 04:00:38
82c024c4c1214f8e0bc9adf80d3e6289c85a1241
codecov[bot]: # [Codecov](https://codecov.io/gh/mir-dataset-loaders/mirdata/pull/295?src=pr&el=h1) Report > Merging [#295](https://codecov.io/gh/mir-dataset-loaders/mirdata/pull/295?src=pr&el=desc) into [master](https://codecov.io/gh/mir-dataset-loaders/mirdata/commit/1eedb64932c593b213e8c2218c1f35a0ee59f0b1?el=desc) will **decrease** coverage by `0.04%`. > The diff coverage is `100.00%`. ```diff @@ Coverage Diff @@ ## master #295 +/- ## ========================================== - Coverage 99.03% 98.98% -0.05% ========================================== Files 22 21 -1 Lines 2373 2263 -110 ========================================== - Hits 2350 2240 -110 Misses 23 23 ``` nkundiushuti: > @nkundiushuti should be ready to go, let me know if this will work for #270 or if there's something missing thanks! I will check this! rabitt: Merging!
diff --git a/mirdata/track.py b/mirdata/track.py index a503ea7..208174d 100644 --- a/mirdata/track.py +++ b/mirdata/track.py @@ -1,18 +1,18 @@ # -*- coding: utf-8 -*- """track object utility functions """ - - import types +import numpy as np + MAX_STR_LEN = 100 class Track(object): def __repr__(self): - properties = [v for v in dir(self.__class__) if not v.startswith('_')] + properties = [v for v in dir(self.__class__) if not v.startswith("_")] attributes = [ - v for v in dir(self) if not v.startswith('_') and v not in properties + v for v in dir(self) if not v.startswith("_") and v not in properties ] repr_str = "Track(\n" @@ -21,7 +21,7 @@ class Track(object): val = getattr(self, attr) if isinstance(val, str): if len(val) > MAX_STR_LEN: - val = '...{}'.format(val[-MAX_STR_LEN:]) + val = "...{}".format(val[-MAX_STR_LEN:]) val = '"{}"'.format(val) repr_str += " {}={},\n".format(attr, val) @@ -33,7 +33,7 @@ class Track(object): if val.__doc__ is None: raise ValueError("{} has no documentation".format(prop)) - val_type_str = val.__doc__.split(':')[0] + val_type_str = val.__doc__.split(":")[0] repr_str += " {}: {},\n".format(prop, val_type_str) repr_str += ")" @@ -41,3 +41,119 @@ class Track(object): def to_jams(self): raise NotImplementedError + + +class MultiTrack(Track): + """MultiTrack class. + + A multitrack class is a collection of track objects and their associated audio + that can be mixed together. + A multitrack is iteslf a Track, and can have its own associated audio (such as + a mastered mix), its own metadata and its own annotations. + + Attributes: + tracks (dict): {track_id: Track} + track_audio_attribute (str): the name of the attribute of Track which + returns the audio to be mixed + """ + + def __init__(self, tracks, track_audio_attribute): + """Inits MultiTrack with tracks and audio attribute""" + self.tracks = tracks + self.track_audio_attribute = track_audio_attribute + + def get_target(self, track_keys, weights=None, average=True, enforce_length=True): + """Get target which is a linear mixture of tracks + + Args: + track_keys (list): list of track keys to mix together + weights (list or None): list of positive scalars to be used in the average + average (bool): if True, computes a weighted average of the tracks + if False, computes a weighted sum of the tracks + enforce_length (bool): If True, raises ValueError if the tracks are + not the same length. If False, pads audio with zeros to match the length + of the longest track + + Returns: + target (np.ndarray): target audio with shape (n_channels, n_samples) + + Raises: + ValueError: + if sample rates of the tracks are not equal + if enforce_length=True and lengths are not equal + + """ + signals = [] + lengths = [] + sample_rates = [] + for k in track_keys: + audio, sample_rate = getattr(self.tracks[k], self.track_audio_attribute)() + # ensure all signals are shape (n_channels, n_samples) + if len(audio.shape) == 1: + audio = audio[np.newaxis, :] + signals.append(audio) + lengths.append(audio.shape[1]) + sample_rates.append(sample_rate) + + if len(set(sample_rates)) > 1: + raise ValueError( + "Sample rates for tracks {} are not equal: {}".format( + track_keys, sample_rates + ) + ) + + max_length = np.max(lengths) + if any([l != max_length for l in lengths]): + if enforce_length: + raise ValueError( + "Track's {} audio are not the same length {}. Use enforce_length=False to pad with zeros.".format( + track_keys, lengths + ) + ) + else: + # pad signals to the max length + signals = [ + np.pad(signal, ((0, 0), (0, max_length - signal.shape[1]))) + for signal in signals + ] + + if weights is None: + weights = np.ones((len(track_keys),)) + + target = np.average(signals, axis=0, weights=weights) + if not average: + target *= np.sum(weights) + + return target + + def get_random_target(self, n_tracks=None, min_weight=0.3, max_weight=1.0): + """Get a random target by combining a random selection of tracks with random weights + + Args: + n_tracks (int or None): number of tracks to randomly mix. If None, uses all tracks + min_weight (float): minimum possible weight when mixing + max_weight (float): maximum possible weight when mixing + + Returns: + target (np.ndarray): mixture audio with shape (n_samples, n_channels) + tracks (list): list of keys of included tracks + weights (list): list of weights used to mix tracks + """ + tracks = list(self.tracks.keys()) + if n_tracks is not None and n_tracks < len(tracks): + tracks = np.random.choice(tracks, n_tracks, replace=False) + + weights = np.random.uniform(low=min_weight, high=max_weight, size=len(tracks)) + target = self.get_target(tracks, weights=weights) + return target, tracks, weights + + def get_mix(self): + """Create a linear mixture given a subset of tracks. + + Args: + track_keys (list): list of track keys to mix together + + Returns: + target (np.ndarray): mixture audio with shape (n_samples, n_channels) + """ + return self.get_target(list(self.tracks.keys())) diff --git a/mirdata/version.py b/mirdata/version.py index bc032fb..f3b35f0 100644 --- a/mirdata/version.py +++ b/mirdata/version.py @@ -2,5 +2,5 @@ # -*- coding: utf-8 -*- """Version info""" -short_version = '0.2' -version = '0.2.0' +short_version = "0.2" +version = "0.2.1"
Multitrack Datasets How can we best support multitrack datasets? The current solution is to have each 'Track' be a multitrack with a ton of attributes, but it's clunky and difficult to take full advantage of the multitracks themselves. We're also loosely tying a `Track` to something that can be mapped to a `jams` file/object, and jams isnt built for multiple audio files. My current thinking is we could index at the stem level, and support a new base class `MultiTrack` which would group multiple `Track` objects. The grouping could be stored as part of the index, and any mixture-level annotations could itself be stored as a `Track` object, and be a part of the multitrack object. Thoughts? cc @nkundiushuti - we could test this out with Phenicx-Anechoic #270
mir-dataset-loaders/mirdata
diff --git a/tests/test_track.py b/tests/test_track.py index b15104e..a25c750 100644 --- a/tests/test_track.py +++ b/tests/test_track.py @@ -2,24 +2,25 @@ import sys import pytest +import numpy as np from mirdata import track if sys.version_info.major == 3: - builtin_module_name = 'builtins' + builtin_module_name = "builtins" else: - builtin_module_name = '__builtin__' + builtin_module_name = "__builtin__" def test_track_repr(): class TestTrack(track.Track): def __init__(self): - self.a = 'asdf' + self.a = "asdf" self.b = 1.2345678 - self.c = {1: 'a', 'b': 2} - self._d = 'hidden' + self.c = {1: "a", "b": 2} + self._d = "hidden" self.e = None - self.long = 'a' + 'b' * 50 + 'c' * 50 + self.long = "a" + "b" * 50 + "c" * 50 @property def f(self): @@ -36,7 +37,7 @@ def test_track_repr(): expected1 = """Track(\n a="asdf",\n b=1.2345678,\n """ expected2 = """c={1: 'a', 'b': 2},\n e=None,\n """ - expected3 = """long="...{}",\n """.format('b' * 50 + 'c' * 50) + expected3 = """long="...{}",\n """.format("b" * 50 + "c" * 50) expected4 = """f: ThisObjectType,\n g: I have an improper docstring,\n)""" test_track = TestTrack() @@ -54,3 +55,160 @@ def test_track_repr(): bad_track = NoDocsTrack() with pytest.raises(ValueError): bad_track.__repr__() + + +def test_multitrack(): + class TestTrack(track.Track): + def __init__(self, key): + self.key = key + + def f(self): + return np.random.uniform(-1, 1, (2, 100)), 1000 + + track_keys = ["a", "b", "c"] + tracks = {k: TestTrack(k) for k in track_keys} + + mtrack = track.MultiTrack(tracks, "f") + + target1 = mtrack.get_target(["a", "c"]) + assert target1.shape == (2, 100) + assert np.max(np.abs(target1)) <= 1 + + target2 = mtrack.get_target(["b", "c"], weights=[0.5, 0.2]) + assert target2.shape == (2, 100) + assert np.max(np.abs(target2)) <= 1 + + target3 = mtrack.get_target(["b", "c"], weights=[0.5, 5]) + assert target3.shape == (2, 100) + assert np.max(np.abs(target3)) <= 1 + + target4 = mtrack.get_target(["a", "c"], average=False) + assert target4.shape == (2, 100) + assert np.max(np.abs(target4)) <= 2 + + target5 = mtrack.get_target(["a", "c"], average=False, weights=[0.1, 0.5]) + assert target5.shape == (2, 100) + assert np.max(np.abs(target5)) <= 0.6 + + random_target1, t1, w1 = mtrack.get_random_target(n_tracks=2) + assert random_target1.shape == (2, 100) + assert np.max(np.abs(random_target1)) <= 1 + assert len(t1) == 2 + assert len(w1) == 2 + assert np.all(w1 >= 0.3) + assert np.all(w1 <= 1.0) + + random_target2, t2, w2 = mtrack.get_random_target(n_tracks=5) + assert random_target2.shape == (2, 100) + assert np.max(np.abs(random_target2)) <= 1 + assert len(t2) == 3 + assert len(w2) == 3 + assert np.all(w2 >= 0.3) + assert np.all(w2 <= 1.0) + + random_target3, t3, w3 = mtrack.get_random_target() + assert random_target3.shape == (2, 100) + assert np.max(np.abs(random_target3)) <= 1 + assert len(t3) == 3 + assert len(w3) == 3 + assert np.all(w3 >= 0.3) + assert np.all(w3 <= 1.0) + + random_target4, t4, w4 = mtrack.get_random_target( + n_tracks=2, min_weight=0.1, max_weight=0.4 + ) + assert random_target4.shape == (2, 100) + assert np.max(np.abs(random_target4)) <= 1 + assert len(t4) == 2 + assert len(w4) == 2 + assert np.all(w4 >= 0.1) + assert np.all(w4 <= 0.4) + + mix = mtrack.get_mix() + assert mix.shape == (2, 100) + + +def test_multitrack_unequal_len(): + class TestTrack(track.Track): + def __init__(self, key): + self.key = key + + def f(self): + return np.random.uniform(-1, 1, (2, np.random.randint(50, 100))), 1000 + + track_keys = ["a", "b", "c"] + tracks = {k: TestTrack(k) for k in track_keys} + + mtrack = track.MultiTrack(tracks, "f") + + with pytest.raises(ValueError): + mtrack.get_target(["a", "b", "c"]) + + target1 = mtrack.get_target(["a", "b", "c"], enforce_length=False) + assert target1.shape[0] == 2 + assert np.max(np.abs(target1)) <= 1 + + target2 = mtrack.get_target(["a", "b", "c"], average=False, enforce_length=False) + assert target2.shape[0] == 2 + assert np.max(np.abs(target2)) <= 3 + + +def test_multitrack_unequal_sr(): + class TestTrack(track.Track): + def __init__(self, key): + self.key = key + + def f(self): + return np.random.uniform(-1, 1, (2, 100)), np.random.randint(10, 1000) + + track_keys = ["a", "b", "c"] + tracks = {k: TestTrack(k) for k in track_keys} + + mtrack = track.MultiTrack(tracks, "f") + + with pytest.raises(ValueError): + mtrack.get_target(["a", "b", "c"]) + + +def test_multitrack_mono(): + ### no first channel - audio shapes (100,) + class TestTrack(track.Track): + def __init__(self, key): + self.key = key + + def f(self): + return np.random.uniform(-1, 1, (100)), 1000 + + track_keys = ["a", "b", "c"] + tracks = {k: TestTrack(k) for k in track_keys} + + mtrack = track.MultiTrack(tracks, "f") + + target1 = mtrack.get_target(["a", "c"]) + assert target1.shape == (1, 100) + assert np.max(np.abs(target1)) <= 1 + + target1 = mtrack.get_target(["a", "c"], average=False) + assert target1.shape == (1, 100) + assert np.max(np.abs(target1)) <= 2 + + ### one channel mono shape (1, 100) + class TestTrack1(track.Track): + def __init__(self, key): + self.key = key + + def f(self): + return np.random.uniform(-1, 1, (1, 100)), 1000 + + track_keys = ["a", "b", "c"] + tracks = {k: TestTrack1(k) for k in track_keys} + + mtrack = track.MultiTrack(tracks, "f") + + target1 = mtrack.get_target(["a", "c"]) + assert target1.shape == (1, 100) + assert np.max(np.abs(target1)) <= 1 + + target1 = mtrack.get_target(["a", "c"], average=False) + assert target1.shape == (1, 100) + assert np.max(np.abs(target1)) <= 2
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-pep8", "pytest-mock", "pytest-localserver" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==24.2.0 audioread==3.0.1 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 charset-normalizer==3.4.1 coverage==7.2.7 DALI-dataset==1.1 decorator==5.1.1 exceptiongroup==1.2.2 execnet==2.0.2 idna==3.10 importlib-metadata==6.7.0 importlib-resources==5.12.0 iniconfig==2.0.0 jams==0.3.4 joblib==1.3.2 jsonschema==4.17.3 lazy_loader==0.4 librosa==0.10.2.post1 llvmlite==0.39.1 MarkupSafe==2.1.5 mido==1.3.3 mir_eval==0.8.2 -e git+https://github.com/mir-dataset-loaders/mirdata.git@f8636612821caa78706c0b56def9084ae5ffd711#egg=mirdata msgpack==1.0.5 numba==0.56.4 numpy==1.21.6 packaging==24.0 pandas==1.3.5 pep8==1.7.1 pkgutil_resolve_name==1.3.10 platformdirs==4.0.0 pluggy==1.2.0 pooch==1.8.2 pretty-midi==0.2.10 pycparser==2.21 pyrsistent==0.19.3 pytest==7.4.4 pytest-cache==1.0 pytest-cov==4.1.0 pytest-localserver==0.9.0.post0 pytest-mock==3.11.1 pytest-pep8==1.0.6 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.31.0 scikit-learn==1.0.2 scipy==1.7.3 six==1.17.0 sortedcontainers==2.4.0 soundfile==0.13.1 soxr==0.3.7 threadpoolctl==3.1.0 tomli==2.0.1 tqdm==4.67.1 typing_extensions==4.7.1 urllib3==2.0.7 Werkzeug==2.2.3 youtube-dl==2021.12.17 zipp==3.15.0
name: mirdata channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==24.2.0 - audioread==3.0.1 - cffi==1.15.1 - charset-normalizer==3.4.1 - coverage==7.2.7 - dali-dataset==1.1 - decorator==5.1.1 - exceptiongroup==1.2.2 - execnet==2.0.2 - idna==3.10 - importlib-metadata==6.7.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - jams==0.3.4 - joblib==1.3.2 - jsonschema==4.17.3 - lazy-loader==0.4 - librosa==0.10.2.post1 - llvmlite==0.39.1 - markupsafe==2.1.5 - mido==1.3.3 - mir-eval==0.8.2 - msgpack==1.0.5 - numba==0.56.4 - numpy==1.21.6 - packaging==24.0 - pandas==1.3.5 - pep8==1.7.1 - pkgutil-resolve-name==1.3.10 - platformdirs==4.0.0 - pluggy==1.2.0 - pooch==1.8.2 - pretty-midi==0.2.10 - pycparser==2.21 - pyrsistent==0.19.3 - pytest==7.4.4 - pytest-cache==1.0 - pytest-cov==4.1.0 - pytest-localserver==0.9.0.post0 - pytest-mock==3.11.1 - pytest-pep8==1.0.6 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.31.0 - scikit-learn==1.0.2 - scipy==1.7.3 - six==1.17.0 - sortedcontainers==2.4.0 - soundfile==0.13.1 - soxr==0.3.7 - threadpoolctl==3.1.0 - tomli==2.0.1 - tqdm==4.67.1 - typing-extensions==4.7.1 - urllib3==2.0.7 - werkzeug==2.2.3 - youtube-dl==2021.12.17 - zipp==3.15.0 prefix: /opt/conda/envs/mirdata
[ "tests/test_track.py::test_multitrack", "tests/test_track.py::test_multitrack_unequal_len", "tests/test_track.py::test_multitrack_unequal_sr", "tests/test_track.py::test_multitrack_mono" ]
[]
[ "tests/test_track.py::test_track_repr" ]
[]
BSD 3-Clause "New" or "Revised" License
8,755
1,574
[ "mirdata/track.py", "mirdata/version.py" ]
rohitsanj__doex-16
1ad6b4d894ffc57a5e8fda66548d4806bdc314b9
2020-10-20 09:53:01
1ad6b4d894ffc57a5e8fda66548d4806bdc314b9
diff --git a/doex/latin_square.py b/doex/latin_square.py index ee3453b..7209dc5 100644 --- a/doex/latin_square.py +++ b/doex/latin_square.py @@ -65,7 +65,9 @@ class LatinSquare: self.dof_columns = n_cols - 1 self.dof_treatments = len(self.treatments) - 1 self.dof_total = N - 1 - self.dof_error = self.dof_total - (self.dof_rows + self.dof_columns + self.dof_treatments) + self.dof_error = self.dof_total - ( + self.dof_rows + self.dof_columns + self.dof_treatments + num_missing + ) # Calculate Mean Sum of Squares self.mss_rows = self.ss_rows / self.dof_rows diff --git a/doex/rcbd.py b/doex/rcbd.py index ff83010..f7ff63d 100644 --- a/doex/rcbd.py +++ b/doex/rcbd.py @@ -9,6 +9,11 @@ class RandomizedCompleteBlockDesign: n_treatments, n_blocks = self.data.shape + if hasattr(self, "num_missing"): + num_missing = self.num_missing + else: + num_missing = 0 + N = 0 for entry in self.data: N += len(entry) @@ -32,7 +37,7 @@ class RandomizedCompleteBlockDesign: self.dof_treatments = n_treatments - 1 self.dof_blocks = n_blocks - 1 self.dof_total = N - 1 - self.dof_error = self.dof_total - (self.dof_treatments + self.dof_blocks) + self.dof_error = self.dof_total - (self.dof_treatments + self.dof_blocks + num_missing) # Calculate Mean Sum of Squares self.mss_treatments = self.ss_treatments / self.dof_treatments @@ -101,11 +106,11 @@ class RandomizedCompleteBlockDesign_MissingValues(RandomizedCompleteBlockDesign) n_treatments, n_blocks = self.data.shape - num_missing = np.count_nonzero(np.isnan(self.data)) + self.num_missing = np.count_nonzero(np.isnan(self.data)) missing_locations = np.argwhere(np.isnan(self.data)) self.handle_missing(self.data, missing_locations) - print("Data after adjusting for {} missing value(s)".format(num_missing)) + print("Data after adjusting for {} missing value(s)".format(self.num_missing)) print(self.data) # Continue with RCBD analysis
BUG: reduce degree of freedom of error with the number of missing values Reported by @nitin-kamath
rohitsanj/doex
diff --git a/doex/tests/test_latin_square.py b/doex/tests/test_latin_square.py index ae0d36d..4a2daa2 100644 --- a/doex/tests/test_latin_square.py +++ b/doex/tests/test_latin_square.py @@ -84,9 +84,9 @@ class TestLatinSquare: ) abs_tol = 10 ** -3 - assert math.isclose(exp.f_treatments, 15.0143, abs_tol=abs_tol) - assert math.isclose(exp.f_rows, 2.5857, abs_tol=abs_tol) - assert math.isclose(exp.f_columns, 1.3714, abs_tol=abs_tol) + assert math.isclose(exp.f_treatments, 12.5119, abs_tol=abs_tol) + assert math.isclose(exp.f_rows, 2.1548, abs_tol=abs_tol) + assert math.isclose(exp.f_columns, 1.1429, abs_tol=abs_tol) def test_latin_square_multiple_comparisons(self): exp = LatinSquare( diff --git a/doex/tests/test_rcbd.py b/doex/tests/test_rcbd.py index c6ceb4f..134fe49 100644 --- a/doex/tests/test_rcbd.py +++ b/doex/tests/test_rcbd.py @@ -48,16 +48,30 @@ class TestRCBDMissing: ] ) abs_tol = 10 ** -3 - assert math.isclose(exp.f_treatments, 0.8102, abs_tol=abs_tol) - assert math.isclose(exp.f_blocks, 2.2349, abs_tol=abs_tol) + assert math.isclose(exp.f_treatments, 0.7561, abs_tol=abs_tol) + assert math.isclose(exp.f_blocks, 2.0859, abs_tol=abs_tol) def test_rcbd_missing_2(self): exp = RandomizedCompleteBlockDesign_MissingValues( [[12, 14, 12], [10, float("nan"), 8], [float("nan"), 15, 10]] ) - assert math.isclose(exp.f_treatments, 9.5, abs_tol=10 ** -3) - assert math.isclose(exp.f_blocks, 15.5, abs_tol=10 ** -3) + assert math.isclose(exp.f_treatments, 4.7500, abs_tol=10 ** -3) + assert math.isclose(exp.f_blocks, 7.7500, abs_tol=10 ** -3) + + def test_rcbd_missing_3(self): + exp = RandomizedCompleteBlockDesign_MissingValues( + [ + [90.3, 89.2, 98.2, 93.9, 87.4, 97.9], + [92.5, 89.5, 90.6, float("nan"), 87, 95.8], + [85.5, 90.8, 89.6, 86.2, 88, 93.4], + [82.5, 89.5, 85.6, 87.4, 78.9, 90.7], + ] + ) + + abs_tol = 10 ** -3 + assert math.isclose(exp.f_treatments, 7.6241, abs_tol=abs_tol) + assert math.isclose(exp.f_blocks, 5.2181, abs_tol=abs_tol) def test_rcbd_missing_throw_error(self): with pytest.raises(Exception):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 2 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
backports.tarfile==1.2.0 black==24.8.0 build==1.2.2.post1 bump2version==1.0.1 bumpversion==0.6.0 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 check-manifest==0.50 click==8.1.8 codecov==2.1.13 colorama==0.4.6 coverage==7.6.1 cryptography==44.0.2 distlib==0.3.9 docutils==0.20.1 -e git+https://github.com/rohitsanj/doex.git@1ad6b4d894ffc57a5e8fda66548d4806bdc314b9#egg=doex exceptiongroup==1.2.2 filelock==3.16.1 flake8==7.1.2 id==1.5.0 idna==3.10 importlib_metadata==8.5.0 importlib_resources==6.4.5 iniconfig==2.1.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 keyring==25.5.0 markdown-it-py==3.0.0 mccabe==0.7.0 mdurl==0.1.2 more-itertools==10.5.0 mypy-extensions==1.0.0 nh3==0.2.21 numpy==1.24.4 packaging==24.2 pandas==2.0.3 pathspec==0.12.1 patsy==1.0.1 platformdirs==4.3.6 pluggy==1.5.0 prettytable==3.11.0 pycodestyle==2.12.1 pycparser==2.22 pyflakes==3.2.0 Pygments==2.19.1 pyproject-api==1.8.0 pyproject_hooks==1.2.0 pytest==8.3.5 pytest-cov==5.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 readme_renderer==43.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 scipy==1.10.1 SecretStorage==3.3.3 six==1.17.0 statsmodels==0.14.1 tomli==2.2.1 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 tzdata==2025.2 urllib3==2.2.3 virtualenv==20.29.3 wcwidth==0.2.13 xmltodict==0.14.2 zipp==3.20.2
name: doex channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - backports-tarfile==1.2.0 - black==24.8.0 - build==1.2.2.post1 - bump2version==1.0.1 - bumpversion==0.6.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - check-manifest==0.50 - click==8.1.8 - codecov==2.1.13 - colorama==0.4.6 - coverage==7.6.1 - cryptography==44.0.2 - distlib==0.3.9 - docutils==0.20.1 - doex==0.0.6 - exceptiongroup==1.2.2 - filelock==3.16.1 - flake8==7.1.2 - id==1.5.0 - idna==3.10 - importlib-metadata==8.5.0 - importlib-resources==6.4.5 - iniconfig==2.1.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - keyring==25.5.0 - markdown-it-py==3.0.0 - mccabe==0.7.0 - mdurl==0.1.2 - more-itertools==10.5.0 - mypy-extensions==1.0.0 - nh3==0.2.21 - numpy==1.24.4 - packaging==24.2 - pandas==2.0.3 - pathspec==0.12.1 - patsy==1.0.1 - platformdirs==4.3.6 - pluggy==1.5.0 - prettytable==3.11.0 - pycodestyle==2.12.1 - pycparser==2.22 - pyflakes==3.2.0 - pygments==2.19.1 - pyproject-api==1.8.0 - pyproject-hooks==1.2.0 - pytest==8.3.5 - pytest-cov==5.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - readme-renderer==43.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - scipy==1.10.1 - secretstorage==3.3.3 - six==1.17.0 - statsmodels==0.14.1 - tomli==2.2.1 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - tzdata==2025.2 - urllib3==2.2.3 - virtualenv==20.29.3 - wcwidth==0.2.13 - xmltodict==0.14.2 - zipp==3.20.2 prefix: /opt/conda/envs/doex
[ "doex/tests/test_latin_square.py::TestLatinSquare::test_latin_square_missing_1", "doex/tests/test_rcbd.py::TestRCBDMissing::test_rcbd_missing_1", "doex/tests/test_rcbd.py::TestRCBDMissing::test_rcbd_missing_2", "doex/tests/test_rcbd.py::TestRCBDMissing::test_rcbd_missing_3" ]
[]
[ "doex/tests/test_latin_square.py::TestLatinSquare::test_latin_square", "doex/tests/test_latin_square.py::TestLatinSquare::test_latin_square_raises_error_shape_mismatch", "doex/tests/test_latin_square.py::TestLatinSquare::test_latin_square_raises_error_treatments", "doex/tests/test_latin_square.py::TestLatinSquare::test_latin_square_multiple_comparisons", "doex/tests/test_rcbd.py::TestRCBD::test_rcbd_1", "doex/tests/test_rcbd.py::TestRCBD::test_rcbd_2", "doex/tests/test_rcbd.py::TestRCBDMissing::test_rcbd_missing_throw_error", "doex/tests/test_rcbd.py::TestRCBDMissing::test_rcbd_multiple_comparisons" ]
[]
BSD 3-Clause "New" or "Revised" License
8,757
633
[ "doex/latin_square.py", "doex/rcbd.py" ]
cloud-custodian__cloud-custodian-6235
894367fbc3c8fc1417c175586f09f46c90929246
2020-10-21 15:05:16
ba7c6540540bac8215ac7b96b1fe50485da140ff
kapilt: would like to see a cli test here
diff --git a/c7n/policy.py b/c7n/policy.py index c0baaa1a7..40d40ea62 100644 --- a/c7n/policy.py +++ b/c7n/policy.py @@ -927,10 +927,13 @@ class PolicyConditions: self.session_factory = rm.session_factory # used by c7n-org to extend evaluation conditions self.env_vars = {} + self.initialized = False def validate(self): - self.filters.extend(self.convert_deprecated()) - self.filters = self.filter_registry.parse(self.filters, self) + if not self.initialized: + self.filters.extend(self.convert_deprecated()) + self.filters = self.filter_registry.parse(self.filters, self) + self.initialized = True def evaluate(self, event=None): policy_vars = dict(self.env_vars) diff --git a/c7n/resources/mq.py b/c7n/resources/mq.py index 6df25f9d7..4b24f312c 100644 --- a/c7n/resources/mq.py +++ b/c7n/resources/mq.py @@ -7,7 +7,7 @@ from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter from c7n.manager import resources from c7n.query import QueryResourceManager, TypeInfo from c7n.utils import local_session, type_schema -from c7n.tags import RemoveTag, Tag, TagDelayedAction, TagActionFilter +from c7n.tags import RemoveTag, Tag, TagDelayedAction, TagActionFilter, universal_augment @resources.register('message-broker') @@ -157,3 +157,19 @@ class MarkForOpMessageBroker(TagDelayedAction): op: delete days: 7 """ + + [email protected]('message-config') +class MessageConfig(QueryResourceManager): + + class resource_type(TypeInfo): + service = 'mq' + enum_spec = ('list_configurations', 'Configurations', None) + cfn_type = 'AWS::AmazonMQ::Configuration' + id = 'Id' + arn = 'Arn' + arn_type = 'configuration' + name = 'Name' + universal_taggable = object() + + augment = universal_augment diff --git a/c7n/resources/resource_map.py b/c7n/resources/resource_map.py index 74b8b7c56..a39c253f1 100644 --- a/c7n/resources/resource_map.py +++ b/c7n/resources/resource_map.py @@ -114,6 +114,7 @@ ResourceMap = { "aws.lightsail-instance": "c7n.resources.lightsail.Instance", "aws.log-group": "c7n.resources.cw.LogGroup", "aws.message-broker": "c7n.resources.mq.MessageBroker", + "aws.message-config": "c7n.resources.mq.MessageConfig", "aws.ml-model": "c7n.resources.ml.MLModel", "aws.nat-gateway": "c7n.resources.vpc.NATGateway", "aws.network-acl": "c7n.resources.vpc.NetworkAcl",
Custodian errors when using example conditional policy with a gcp resource **Describe the bug** Attempting to run the [example conditional execution policy ](https://cloudcustodian.io/docs/quickstart/advanced.html#conditional-policy-execution) in combination with a gcp resource results in an error when validating the condition. **To Reproduce** Steps to reproduce the behavior: 1. Create a policy using the condition of the example for a gcp resource, I used `gcp.function`. 2. `custodian run` the policy **Expected behavior** The example policy runs successfully. **Background Information:** - Policy: ```yaml policies: - name: cloudfunction resource: gcp.function conditions: - type: value key: now op: greater-than value_type: date value: "2018-12-15" - type: value key: now op: less-than value_type: date value: "2018-12-31" ``` - Traceback: ``` custodian run ~/Projects/policies/policy.yaml -v -s ~/Projects/policies/.custodian 2020-10-15 16:44:26,694: custodian.commands:DEBUG Loaded file /Users/kevinkessels/Projects/policies/policy.yaml. Contains 1 policies Traceback (most recent call last): File "/Users/kevinkessels/Projects/custodian/venv/bin/custodian", line 8, in <module> sys.exit(main()) File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/cli.py", line 352, in main command(config) File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/commands.py", line 124, in _load_policies p.validate() File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/policy.py", line 1046, in validate self.conditions.validate() File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/policy.py", line 929, in validate self.filters = self.filter_registry.parse(self.filters, self) File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/filters/core.py", line 122, in parse results.append(self.factory(d, manager)) File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/filters/core.py", line 146, in factory filter_type = data.get('type') AttributeError: 'ValueFilter' object has no attribute 'get' ``` - `custodian version --debug` output ``` Custodian: 0.9.6 Python: 3.8.5 (default, Jul 21 2020, 10:48:26) [Clang 11.0.3 (clang-1103.0.32.62)] Platform: posix.uname_result(sysname='Darwin', nodename='M22252.fritz.box', release='19.6.0', version='Darwin Kernel Version 19.6.0: Mon Aug 31 22:12:52 PDT 2020; root:xnu-6153.141.2~1/RELEASE_X86_64', machine='x86_64') Using venv: True Docker: False Installed: argcomplete==1.12.1 attrs==20.2.0 boto3==1.15.12 botocore==1.18.12 c7n==0.9.6 cachetools==4.1.1 certifi==2020.6.20 chardet==3.0.4 docutils==0.15.2 google-api-core==1.22.2 google-api-python-client==1.12.3 google-auth==1.21.2 google-auth-httplib2==0.0.4 google-cloud-core==1.4.1 google-cloud-logging==1.15.1 google-cloud-monitoring==0.34.0 google-cloud-storage==1.31.0 google-crc32c==1.0.0 google-resumable-media==1.0.0 googleapis-common-protos==1.52.0 httplib2==0.18.1 idna==2.10 importlib-metadata==1.7.0 jmespath==0.10.0 jsonschema==3.2.0 protobuf==3.13.0 pyasn1==0.4.8 pyasn1-modules==0.2.8 pyrsistent==0.16.1 python-dateutil==2.8.1 pytz==2020.1 pyyaml==5.3.1 ratelimiter==1.2.0.post0 requests==2.24.0 retrying==1.3.3 rsa==4.6 s3transfer==0.3.3 setuptools==50.0.0 six==1.15.0 tabulate==0.8.7 uritemplate==3.0.1 urllib3==1.25.10 zipp==3.3.0 ``` Note that validating the policy with `custodian validate` succeeds.
cloud-custodian/cloud-custodian
diff --git a/tests/data/placebo/test_mq_config_tagging/mq.ListConfigurations_1.json b/tests/data/placebo/test_mq_config_tagging/mq.ListConfigurations_1.json new file mode 100644 index 000000000..b32569dd2 --- /dev/null +++ b/tests/data/placebo/test_mq_config_tagging/mq.ListConfigurations_1.json @@ -0,0 +1,15 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": {}, + "Configurations": [ + { + "Arn": "arn:aws:mq:us-east-1:644160558196:configuration:c-1cd76ad8-81bc-46f6-b43f-56fb118b426c", + "EngineType": "ActiveMQ", + "EngineVersion": "5.15.12", + "Id": "c-1cd76ad8-81bc-46f6-b43f-56fb118b426c", + "Name": "test-config" + } + ] + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_mq_config_tagging/mq.ListTags_1.json b/tests/data/placebo/test_mq_config_tagging/mq.ListTags_1.json new file mode 100644 index 000000000..0c7b233ed --- /dev/null +++ b/tests/data/placebo/test_mq_config_tagging/mq.ListTags_1.json @@ -0,0 +1,9 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": {}, + "Tags": { + "Env": "Dev" + } + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_mq_config_tagging/tagging.GetResources_1.json b/tests/data/placebo/test_mq_config_tagging/tagging.GetResources_1.json new file mode 100644 index 000000000..d6743bbbc --- /dev/null +++ b/tests/data/placebo/test_mq_config_tagging/tagging.GetResources_1.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "PaginationToken": "", + "ResourceTagMappingList": [ + { + "ResourceARN": "arn:aws:mq:us-east-1:644160558196:configuration:c-1cd76ad8-81bc-46f6-b43f-56fb118b426c", + "Tags": [ + { + "Key": "Role", + "Value": "Dev" + } + ] + } + ], + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_mq_config_tagging/tagging.TagResources_1.json b/tests/data/placebo/test_mq_config_tagging/tagging.TagResources_1.json new file mode 100644 index 000000000..bd2dead85 --- /dev/null +++ b/tests/data/placebo/test_mq_config_tagging/tagging.TagResources_1.json @@ -0,0 +1,7 @@ +{ + "status_code": 200, + "data": { + "FailedResourcesMap": {}, + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/data/placebo/test_mq_config_tagging/tagging.UntagResources_1.json b/tests/data/placebo/test_mq_config_tagging/tagging.UntagResources_1.json new file mode 100644 index 000000000..bd2dead85 --- /dev/null +++ b/tests/data/placebo/test_mq_config_tagging/tagging.UntagResources_1.json @@ -0,0 +1,7 @@ +{ + "status_code": 200, + "data": { + "FailedResourcesMap": {}, + "ResponseMetadata": {} + } +} \ No newline at end of file diff --git a/tests/test_mq.py b/tests/test_mq.py index 9bd8cb33b..c03c09768 100644 --- a/tests/test_mq.py +++ b/tests/test_mq.py @@ -96,3 +96,31 @@ class MessageQueue(BaseTest): tags, {'Env': 'Dev', 'maid_status': 'Resource does not meet policy: delete@2019/01/31'}) + + def test_mq_config_tagging(self): + factory = self.replay_flight_data("test_mq_config_tagging") + p = self.load_policy( + { + "name": "mark-unused-mq-delete", + "resource": "message-config", + 'filters': [{'tag:Role': 'Dev'}], + "actions": [ + {'type': 'tag', + 'tags': {'Env': 'Dev'}}, + {'type': 'remove-tag', + 'tags': ['Role']}]}, + config={'region': 'us-east-1'}, + session_factory=factory, + ) + resources = p.run() + self.assertTrue(len(resources), 1) + client = factory().client("mq") + if self.recording: + time.sleep(1) + tags = client.list_tags(ResourceArn=resources[0]["Arn"])["Tags"] + self.assertEqual( + {t['Key']: t['Value'] for t in resources[0]['Tags']}, + {'Role': 'Dev'}) + self.assertEqual( + tags, + {'Env': 'Dev'})
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 3 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
apipkg==1.5 appdirs==1.4.4 argcomplete==1.12.1 attrs==20.2.0 aws-xray-sdk==2.6.0 bleach==3.2.1 boto3==1.15.11 botocore==1.18.11 -e git+https://github.com/cloud-custodian/cloud-custodian.git@894367fbc3c8fc1417c175586f09f46c90929246#egg=c7n certifi==2020.6.20 cffi==1.14.3 chardet==3.0.4 colorama==0.4.3 coverage==5.3 cryptography==3.1.1 distlib==0.3.1 docutils==0.16 exceptiongroup==1.2.2 execnet==1.7.1 filelock==3.0.12 flake8==3.8.4 future==0.18.2 idna==2.10 importlib-metadata==1.7.0 iniconfig==1.0.1 jeepney==0.4.3 jmespath==0.10.0 jsonpatch==1.26 jsonpickle==1.3 jsonpointer==2.0 jsonschema==3.2.0 keyring==21.4.0 mccabe==0.6.1 mock==4.0.2 more-itertools==8.5.0 multidict==4.7.6 packaging==20.4 pkginfo==1.5.0.1 placebo==0.9.0 pluggy==1.5.0 portalocker==1.7.1 psutil==5.7.2 py==1.9.0 pycodestyle==2.6.0 pycparser==2.20 pyflakes==2.2.0 Pygments==2.7.1 pyparsing==2.4.7 pyrsistent==0.17.3 pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==2.10.1 pytest-forked==1.3.0 pytest-mock==3.14.0 pytest-sugar==0.9.4 pytest-terraform==0.5.1 pytest-xdist==1.34.0 python-dateutil==2.8.1 PyYAML==5.3.1 readme-renderer==26.0 requests==2.24.0 requests-toolbelt==0.9.1 rfc3986==1.4.0 s3transfer==0.3.3 SecretStorage==3.1.2 six==1.15.0 tabulate==0.8.7 termcolor==1.1.0 toml==0.10.1 tomli==2.2.1 tox==3.20.0 tqdm==4.50.0 twine==3.2.0 urllib3==1.25.10 vcrpy==4.1.0 virtualenv==20.0.33 webencodings==0.5.1 wrapt==1.12.1 yarl==1.6.0 zipp==3.3.0
name: cloud-custodian channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - apipkg==1.5 - appdirs==1.4.4 - argcomplete==1.12.1 - attrs==20.2.0 - aws-xray-sdk==2.6.0 - bleach==3.2.1 - boto3==1.15.11 - botocore==1.18.11 - c7n==0.9.7 - certifi==2020.6.20 - cffi==1.14.3 - chardet==3.0.4 - colorama==0.4.3 - coverage==5.3 - cryptography==3.1.1 - distlib==0.3.1 - docutils==0.16 - exceptiongroup==1.2.2 - execnet==1.7.1 - filelock==3.0.12 - flake8==3.8.4 - future==0.18.2 - idna==2.10 - importlib-metadata==1.7.0 - iniconfig==1.0.1 - jeepney==0.4.3 - jmespath==0.10.0 - jsonpatch==1.26 - jsonpickle==1.3 - jsonpointer==2.0 - jsonschema==3.2.0 - keyring==21.4.0 - mccabe==0.6.1 - mock==4.0.2 - more-itertools==8.5.0 - multidict==4.7.6 - packaging==20.4 - pkginfo==1.5.0.1 - placebo==0.9.0 - pluggy==1.5.0 - portalocker==1.7.1 - psutil==5.7.2 - py==1.9.0 - pycodestyle==2.6.0 - pycparser==2.20 - pyflakes==2.2.0 - pygments==2.7.1 - pyparsing==2.4.7 - pyrsistent==0.17.3 - pytest==8.3.5 - pytest-asyncio==0.24.0 - pytest-cov==2.10.1 - pytest-forked==1.3.0 - pytest-mock==3.14.0 - pytest-sugar==0.9.4 - pytest-terraform==0.5.1 - pytest-xdist==1.34.0 - python-dateutil==2.8.1 - pyyaml==5.3.1 - readme-renderer==26.0 - requests==2.24.0 - requests-toolbelt==0.9.1 - rfc3986==1.4.0 - s3transfer==0.3.3 - secretstorage==3.1.2 - six==1.15.0 - tabulate==0.8.7 - termcolor==1.1.0 - toml==0.10.1 - tomli==2.2.1 - tox==3.20.0 - tqdm==4.50.0 - twine==3.2.0 - urllib3==1.25.10 - vcrpy==4.1.0 - virtualenv==20.0.33 - webencodings==0.5.1 - wrapt==1.12.1 - yarl==1.6.0 - zipp==3.3.0 prefix: /opt/conda/envs/cloud-custodian
[ "tests/test_mq.py::MessageQueue::test_mq_config_tagging" ]
[]
[ "tests/test_mq.py::MessageQueue::test_delete_mq", "tests/test_mq.py::MessageQueue::test_metrics", "tests/test_mq.py::MessageQueue::test_mq_tag_untag_markforop", "tests/test_mq.py::MessageQueue::test_query_with_subnet_sg_filter" ]
[]
Apache License 2.0
8,763
737
[ "c7n/policy.py", "c7n/resources/mq.py", "c7n/resources/resource_map.py" ]
asottile__pyupgrade-347
1510dc5bf302e4218f6202185b6725e6a2653e07
2020-10-22 18:23:32
5e4e0ddfdc0e24443e1d12111c57127c3caf90c1
diff --git a/pyupgrade.py b/pyupgrade.py index 8fe8d11..79f2bb2 100644 --- a/pyupgrade.py +++ b/pyupgrade.py @@ -2233,7 +2233,7 @@ def _fix_py3_plus( victims = _victims(tokens, i, call, gen=False) del tokens[victims.starts[0] + 1:victims.ends[-1]] elif token.offset in visitor.encode_calls: - i = _find_open_paren(tokens, i) + i = _find_open_paren(tokens, i + 1) call = visitor.encode_calls[token.offset] victims = _victims(tokens, i, call, gen=False) del tokens[victims.starts[0] + 1:victims.ends[-1]]
pyupgrade incorrect output before ```python expectedResult = ( ":example.com \u0442\u0435\u0441\u0442 " "\u043d\u0438\u043a\r\n" ).encode("utf-8") ``` expected ```python b':example.com \xd1\x82\xd0\xb5\xd1\x81\xd1\x82 \xd0\xbd\xd0\xb8\xd0\xba\r\n' ``` or maybe this? ```python ':example.com тест ник\r\n'.encode() ``` actual ```python expectedResult = ().encode("utf-8") ```
asottile/pyupgrade
diff --git a/tests/default_encoding_test.py b/tests/default_encoding_test.py index 4385583..f39548d 100644 --- a/tests/default_encoding_test.py +++ b/tests/default_encoding_test.py @@ -13,6 +13,14 @@ from pyupgrade import _fix_py3_plus 'sys.stdout.buffer.write(\n "a"\n "b".encode("utf-8")\n)', 'sys.stdout.buffer.write(\n "a"\n "b".encode()\n)', ), + ( + 'x = (\n' + ' "y\\u2603"\n' + ').encode("utf-8")\n', + 'x = (\n' + ' "y\\u2603"\n' + ').encode()\n', + ), ), ) def test_fix_encode(s, expected):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
2.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "covdefaults", "coverage", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
covdefaults==2.3.0 coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 -e git+https://github.com/asottile/pyupgrade.git@1510dc5bf302e4218f6202185b6725e6a2653e07#egg=pyupgrade tokenize_rt==6.1.0 tomli==2.2.1
name: pyupgrade channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - covdefaults==2.3.0 - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tokenize-rt==6.1.0 - tomli==2.2.1 prefix: /opt/conda/envs/pyupgrade
[ "tests/default_encoding_test.py::test_fix_encode[x" ]
[]
[ "tests/default_encoding_test.py::test_fix_encode[\"asd\".encode(\"utf-8\")-\"asd\".encode()]", "tests/default_encoding_test.py::test_fix_encode[\"asd\".encode(\"utf8\")-\"asd\".encode()]", "tests/default_encoding_test.py::test_fix_encode[\"asd\".encode(\"UTF-8\")-\"asd\".encode()]", "tests/default_encoding_test.py::test_fix_encode[sys.stdout.buffer.write(\\n", "tests/default_encoding_test.py::test_fix_encode_noop[\"asd\".encode(\"unknown-codec\")]", "tests/default_encoding_test.py::test_fix_encode_noop[\"asd\".encode(\"ascii\")]", "tests/default_encoding_test.py::test_fix_encode_noop[x=\"asd\"\\nx.encode(\"utf-8\")]", "tests/default_encoding_test.py::test_fix_encode_noop[\"asd\".encode(\"utf-8\",", "tests/default_encoding_test.py::test_fix_encode_noop[\"asd\".encode(encoding=\"utf-8\")]" ]
[]
MIT License
8,768
191
[ "pyupgrade.py" ]
Picterra__picterra-python-39
e53b36171ceb9a921b5dbfc50665c14b91cdd7bc
2020-10-23 15:25:30
e53b36171ceb9a921b5dbfc50665c14b91cdd7bc
diff --git a/examples/detectors_management.py b/examples/detectors_management.py new file mode 100644 index 0000000..56ff82f --- /dev/null +++ b/examples/detectors_management.py @@ -0,0 +1,14 @@ +import json +from picterra import APIClient + +# Set the PICTERRA_API_KEY environment variable to define your API key +client = APIClient() + +# Create a new detector (its type is 'count' by default) +detector_id = client.create_detector('My first detector') + + +# List existing detectors +for d in client.list_detectors(): + print('detector id=%s, name=%s, detection_type=%s, output_type=%s, training_steps=%d' % ( + d['id'], d['name'], d['detection_type'], d['output_type'], d['training_steps'])) \ No newline at end of file diff --git a/src/picterra/client.py b/src/picterra/client.py index 45d8adf..50b68bc 100644 --- a/src/picterra/client.py +++ b/src/picterra/client.py @@ -58,6 +58,19 @@ class APIClient(): raise APIError('Operation %s failed' % operation_id) time.sleep(poll_interval) + def _paginate_through_list(self, resource_endpoint: str): + url = self._api_url('%s/?page_number=1' % resource_endpoint) + data = [] + while url: + logger.debug('Paginating through %s list at page %s' % (resource_endpoint, url)) + resp = self.sess.get(url) + r = resp.json() + url = r['next'] + count = r['count'] + data += r['results'] + assert len(data) == count + return data + def upload_raster(self, filename, name, folder_id=None): """ Upload a raster to picterra. @@ -122,17 +135,7 @@ class APIClient(): } """ - url = self._api_url('rasters/?page_number=1') - data = [] - while url: - logger.debug('Paginating through rasters list at page %s' % url) - resp = self.sess.get(url) - r = resp.json() - url = r['next'] - count = r['count'] - data += r['results'] - assert len(data) == count - return data + return self._paginate_through_list('rasters') def delete_raster(self, raster_id): """ @@ -238,6 +241,33 @@ class APIClient(): raise APIError(resp.text) return resp.json()['id'] + def list_detectors(self): + """ + Returns: + A list of detectors dictionaries + + Example: + + :: + + { + 'id': '42', + 'detection_type': 'count', + 'name': 'cow detector', + 'output_type': 'bbox', + 'training_steps': 787 + }, + { + 'id': '43', + 'detection_type': 'segmentation', + 'name': 'test5', + 'output_type': 'polygon', + 'training_steps': 500 + } + + """ + return self._paginate_through_list('detectors') + def run_detector(self, detector_id, raster_id): """ Runs a detector on a raster
Add list detectors endpoint
Picterra/picterra-python
diff --git a/tests/test_client.py b/tests/test_client.py index bb7145e..89d5f51 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -38,6 +38,25 @@ def add_mock_rasters_list_response(): responses.add(responses.GET, api_url('rasters/?page_number=2'), json=data2, status=200) +def add_mock_detectors_list_response(): + data1 = { + "count": 4, "next": api_url('detectors/?page_number=2'), "previous": None, "page_size": 2, + "results": [ + {"id": "40", "type": "count", "name": "detector1"}, + {"id": "41", "type": "count", "name": "detector2"} + ] + } + data2 = { + "count": 4, "next": None, "previous": api_url('detectors/?page_number=1'), "page_size": 2, + "results": [ + {"id": "42", "type": "count", "name": "detector3"}, + {"id": "43", "type": "count", "name": "detector4"} + ] + } + responses.add(responses.GET, api_url('detectors/?page_number=1'), json=data1, status=200) + responses.add(responses.GET, api_url('detectors/?page_number=2'), json=data2, status=200) + + def add_mock_detector_creation_response(): responses.add(responses.POST, api_url('detectors/'), json={'id': 'foobar'}, status=201) @@ -270,6 +289,15 @@ def test_detector_creation(): assert detector == 'foobar' [email protected] +def test_list_detectors(): + client = _client() + add_mock_detectors_list_response() + detectors = client.list_detectors() + assert detectors[0]['name'] == 'detector1' + assert detectors[1]['name'] == 'detector2' + + @responses.activate def test_set_raster_detection_areas_from_file(): add_mock_detection_areas_upload_responses(1)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "responses" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 anyio==4.9.0 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 colorama==0.4.6 docutils==0.21.2 exceptiongroup==1.2.2 h11==0.14.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 packaging==24.2 -e git+https://github.com/Picterra/picterra-python.git@e53b36171ceb9a921b5dbfc50665c14b91cdd7bc#egg=picterra pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 PyYAML==6.0.2 requests==2.32.3 responses==0.25.7 sniffio==1.3.1 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-autobuild==2024.10.3 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 starlette==0.46.1 tomli==2.2.1 typing_extensions==4.13.0 urllib3==2.3.0 uvicorn==0.34.0 watchfiles==1.0.4 websockets==15.0.1 zipp==3.21.0
name: picterra-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - anyio==4.9.0 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - colorama==0.4.6 - docutils==0.21.2 - exceptiongroup==1.2.2 - h11==0.14.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - packaging==24.2 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - pyyaml==6.0.2 - requests==2.32.3 - responses==0.25.7 - sniffio==1.3.1 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-autobuild==2024.10.3 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - starlette==0.46.1 - tomli==2.2.1 - typing-extensions==4.13.0 - urllib3==2.3.0 - uvicorn==0.34.0 - watchfiles==1.0.4 - websockets==15.0.1 - zipp==3.21.0 prefix: /opt/conda/envs/picterra-python
[ "tests/test_client.py::test_list_detectors" ]
[]
[ "tests/test_client.py::test_upload_raster", "tests/test_client.py::test_delete_raster", "tests/test_client.py::test_list_rasters", "tests/test_client.py::test_detector_creation", "tests/test_client.py::test_set_raster_detection_areas_from_file", "tests/test_client.py::test_run_detector", "tests/test_client.py::test_download_result_to_file", "tests/test_client.py::test_upload_annotations", "tests/test_client.py::test_train_detector" ]
[]
MIT License
8,776
823
[ "src/picterra/client.py" ]
Parquery__pyicontract-lint-28
76ece692dac02dd926a5c1c30fd5933899ffeefc
2020-10-23 18:36:04
64af429573debeb5709394ae0c6d7c67cd91b972
coveralls: ## Pull Request Test Coverage Report for [Build 102](https://coveralls.io/builds/34419591) * **8** of **9** **(88.89%)** changed or added relevant lines in **1** file are covered. * No unchanged relevant lines lost coverage. * Overall coverage remained the same at **91.525%** --- | Changes Missing Coverage | Covered Lines | Changed/Added Lines | % | | :-----|--------------|--------|---: | | [icontract_lint/__init__.py](https://coveralls.io/builds/34419591/source?filename=icontract_lint%2F__init__.py#L392) | 8 | 9 | 88.89% <!-- | **Total:** | **8** | **9** | **88.89%** | --> | Totals | [![Coverage Status](https://coveralls.io/builds/34419591/badge)](https://coveralls.io/builds/34419591) | | :-- | --: | | Change from base [Build 99](https://coveralls.io/builds/33110514): | 0.0% | | Covered Lines: | 270 | | Relevant Lines: | 295 | --- ##### 💛 - [Coveralls](https://coveralls.io)
diff --git a/icontract_lint/__init__.py b/icontract_lint/__init__.py index 8639113..9dced7a 100644 --- a/icontract_lint/__init__.py +++ b/icontract_lint/__init__.py @@ -351,25 +351,6 @@ class _LintVisitor(_AstroidVisitor): else: raise NotImplementedError("Unhandled pytype: {}".format(pytype)) - def _infer_decorator(self, node: astroid.nodes.Call) -> Optional[astroid.bases.Instance]: - """ - Try to infer the decorator as instance of a class. - - :param node: decorator AST node - :return: instance of the decorator or None if decorator instance could not be inferred - """ - # While this function does not use ``self``, keep it close to the usage to improve the readability. - # pylint: disable=no-self-use - try: - decorator = next(node.infer()) - except astroid.exceptions.NameInferenceError: - return None - - if decorator is astroid.Uninferable: - return None - - return decorator - def visit_FunctionDef(self, node: astroid.nodes.FunctionDef) -> None: # pylint: disable=invalid-name """Lint the function definition.""" if node.decorators is None: @@ -394,7 +375,25 @@ class _LintVisitor(_AstroidVisitor): pass # Infer the decorator instances - decorators = [self._infer_decorator(node=decorator_node) for decorator_node in node.decorators.nodes] + + def infer_decorator(a_node: astroid.nodes.Call) -> Optional[astroid.bases.Instance]: + """ + Try to infer the decorator as instance of a class. + + :param a_node: decorator AST node + :return: instance of the decorator or None if decorator instance could not be inferred + """ + try: + decorator = next(a_node.infer()) + except (astroid.exceptions.NameInferenceError, astroid.exceptions.InferenceError): + return None + + if decorator is astroid.Uninferable: + return None + + return decorator + + decorators = [infer_decorator(a_node=decorator_node) for decorator_node in node.decorators.nodes] # Check the decorators individually for decorator, decorator_node in zip(decorators, node.decorators.nodes):
Error linting file w/o icontract I'm trying out icontract and this linter. I get a fatal error in [this file](https://github.com/pymor/pymor/blob/contracts_type-hints/src/pymor/algorithms/rules.py) [pip freeze output](https://github.com/Parquery/pyicontract-lint/files/5429910/freeze.txt) ``` pyicontract-lint ./src/pymor/algorithms/rules.py Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 132, in raise_if_nothing_inferred yield next(generator) StopIteration: {'node': <Attribute.instancemethod l.209 at 0x7f2f85040350>, 'context': <astroid.context.InferenceContext object at 0x7f2f850a8470>} During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/bin/pyicontract-lint", line 8, in <module> sys.exit(main()) File "/usr/local/lib/python3.7/site-packages/icontract_lint/main.py", line 70, in main return _main(args=args, stream=sys.stdout) File "/usr/local/lib/python3.7/site-packages/icontract_lint/main.py", line 49, in _main errors = icontract_lint.check_paths(paths=args.paths) File "/usr/local/lib/python3.7/site-packages/icontract/_checkers.py", line 396, in wrapper result = func(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 576, in check_paths errs.extend(check_recursively(path=pth)) File "/usr/local/lib/python3.7/site-packages/icontract/_checkers.py", line 396, in wrapper result = func(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 556, in check_recursively errs.extend(check_file(pth)) File "/usr/local/lib/python3.7/site-packages/icontract/_checkers.py", line 396, in wrapper result = func(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 541, in check_file lint_visitor.visit(node=tree) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 109, in visit return func(node) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 114, in visit_generic self.visit(child) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 109, in visit return func(node) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 483, in visit_ClassDef self.visit(child) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 109, in visit return func(node) File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 397, in visit_FunctionDef decorators = [self._infer_decorator(node=decorator_node) for decorator_node in node.decorators.nodes] File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 397, in <listcomp> decorators = [self._infer_decorator(node=decorator_node) for decorator_node in node.decorators.nodes] File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 364, in _infer_decorator decorator = next(node.infer()) File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 137, in raise_if_nothing_inferred raise exceptions.InferenceError(**error.args[0]) astroid.exceptions.InferenceError: Inference failed for <Attribute.instancemethod l.209 at 0x7f2f85040350>. ``` It's reproducable on mybinder if you click `new -> terminal` and run `pyicontract-lint /pymor/src/pymor/algorithms/rules.py` [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/pymor/pymor/contracts_type-hints)
Parquery/pyicontract-lint
diff --git a/tests/test_icontract_lint.py b/tests/test_icontract_lint.py index be91d62..d09c398 100644 --- a/tests/test_icontract_lint.py +++ b/tests/test_icontract_lint.py @@ -82,22 +82,33 @@ class TestCheckUnreadableFile(unittest.TestCase): self.assertEqual(str(pth), errors[0].filename) -class TestCheckFile(unittest.TestCase): - def test_wo_contracts(self): +class TestUninferrableDecorator(unittest.TestCase): + def test_astroid_name_inference_error(self): text = textwrap.dedent("""\ + @some_uninferrable_decorator def some_func(x: int) -> int: pass - - class SomeClass: - def some_method(self, x: int) -> int: - pass - - @classmethod - def some_class_method(self, x: int) -> int: + """) + + with tempfile.TemporaryDirectory() as tmp: + tmp_path = pathlib.Path(tmp) + pth = tmp_path / "some_module.py" + pth.write_text(text) + + with sys_path_with(tmp_path): + errors = icontract_lint.check_file(path=pth) + self.assertListEqual([], errors) + + def test_astroid_inferrence_error(self): + # This example was adapted from the issue https://github.com/Parquery/pyicontract-lint/issues/27. + text = textwrap.dedent("""\ + class RuleTable: + @classinstancemethod + def insert_rule(cls, index, rule_): pass - - @staticmethod - def some_static_method(self, x: int) -> int: + + @insert_rule.instancemethod + def insert_rule(self, index, rule_): pass """) @@ -108,14 +119,26 @@ class TestCheckFile(unittest.TestCase): with sys_path_with(tmp_path): errors = icontract_lint.check_file(path=pth) - self.assertListEqual([], errors) - def test_uninferrable_decorator(self): + +class TestCheckFile(unittest.TestCase): + def test_wo_contracts(self): text = textwrap.dedent("""\ - @some_uninferrable_decorator def some_func(x: int) -> int: pass + + class SomeClass: + def some_method(self, x: int) -> int: + pass + + @classmethod + def some_class_method(self, x: int) -> int: + pass + + @staticmethod + def some_static_method(self, x: int) -> int: + pass """) with tempfile.TemporaryDirectory() as tmp:
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.7", "reqs_path": [ "requirements-doc.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 astroid==2.5 asttokens==2.4.1 Babel==2.14.0 cachetools==5.5.2 certifi @ file:///croot/certifi_1671487769961/work/certifi chardet==5.2.0 charset-normalizer==3.4.1 colorama==0.4.6 coverage==4.5.4 distlib==0.3.9 docutils==0.16 exceptiongroup==1.2.2 filelock==3.12.2 icontract==2.7.1 idna==3.10 imagesize==1.4.1 importlib-metadata==6.7.0 iniconfig==2.0.0 isort==5.11.5 Jinja2==3.1.6 lazy-object-proxy==1.9.0 MarkupSafe==2.1.5 mccabe==0.6.1 mypy==0.782 mypy-extensions==0.4.4 packaging==24.0 platformdirs==4.0.0 pluggy==1.2.0 pydocstyle==2.1.1 Pygments==2.17.2 -e git+https://github.com/Parquery/pyicontract-lint.git@76ece692dac02dd926a5c1c30fd5933899ffeefc#egg=pyicontract_lint pylint==2.6.0 pyproject-api==1.5.3 pytest==7.4.4 pytz==2025.2 requests==2.31.0 six==1.17.0 snowballstemmer==2.2.0 Sphinx==1.8.6 sphinx-autodoc-typehints==1.6.0 sphinx-rtd-theme==0.5.2 sphinxcontrib-serializinghtml==1.1.5 sphinxcontrib-websupport==1.2.4 toml==0.10.2 tomli==2.0.1 tox==4.8.0 typed-ast==1.4.3 typing_extensions==4.7.1 urllib3==2.0.7 virtualenv==20.26.6 wrapt==1.12.1 yapf==0.20.2 zipp==3.15.0
name: pyicontract-lint channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - astroid==2.5 - asttokens==2.4.1 - babel==2.14.0 - cachetools==5.5.2 - chardet==5.2.0 - charset-normalizer==3.4.1 - colorama==0.4.6 - coverage==4.5.4 - distlib==0.3.9 - docutils==0.16 - exceptiongroup==1.2.2 - filelock==3.12.2 - icontract==2.7.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - isort==5.11.5 - jinja2==3.1.6 - lazy-object-proxy==1.9.0 - markupsafe==2.1.5 - mccabe==0.6.1 - mypy==0.782 - mypy-extensions==0.4.4 - packaging==24.0 - platformdirs==4.0.0 - pluggy==1.2.0 - pydocstyle==2.1.1 - pygments==2.17.2 - pylint==2.6.0 - pyproject-api==1.5.3 - pytest==7.4.4 - pytz==2025.2 - requests==2.31.0 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==1.8.6 - sphinx-autodoc-typehints==1.6.0 - sphinx-rtd-theme==0.5.2 - sphinxcontrib-serializinghtml==1.1.5 - sphinxcontrib-websupport==1.2.4 - toml==0.10.2 - tomli==2.0.1 - tox==4.8.0 - typed-ast==1.4.3 - typing-extensions==4.7.1 - urllib3==2.0.7 - virtualenv==20.26.6 - wrapt==1.12.1 - yapf==0.20.2 - zipp==3.15.0 prefix: /opt/conda/envs/pyicontract-lint
[ "tests/test_icontract_lint.py::TestUninferrableDecorator::test_astroid_inferrence_error" ]
[]
[ "tests/test_icontract_lint.py::TestCheckUnreadableFile::test_parse_failure", "tests/test_icontract_lint.py::TestCheckUnreadableFile::test_read_failure", "tests/test_icontract_lint.py::TestUninferrableDecorator::test_astroid_name_inference_error", "tests/test_icontract_lint.py::TestCheckFile::test_disabled", "tests/test_icontract_lint.py::TestCheckFile::test_inv_invalid_arg", "tests/test_icontract_lint.py::TestCheckFile::test_inv_ok", "tests/test_icontract_lint.py::TestCheckFile::test_missing_condition", "tests/test_icontract_lint.py::TestCheckFile::test_no_condition_in_inv", "tests/test_icontract_lint.py::TestCheckFile::test_post_invalid_args", "tests/test_icontract_lint.py::TestCheckFile::test_post_old_conflict", "tests/test_icontract_lint.py::TestCheckFile::test_post_result_conflict", "tests/test_icontract_lint.py::TestCheckFile::test_post_result_none", "tests/test_icontract_lint.py::TestCheckFile::test_post_valid", "tests/test_icontract_lint.py::TestCheckFile::test_post_valid_without_returns", "tests/test_icontract_lint.py::TestCheckFile::test_pre_invalid_arg", "tests/test_icontract_lint.py::TestCheckFile::test_pre_valid", "tests/test_icontract_lint.py::TestCheckFile::test_snapshot_invalid_arg", "tests/test_icontract_lint.py::TestCheckFile::test_snapshot_valid", "tests/test_icontract_lint.py::TestCheckFile::test_snapshot_wo_post", "tests/test_icontract_lint.py::TestCheckFile::test_syntax_error", "tests/test_icontract_lint.py::TestCheckFile::test_uninferrable_returns", "tests/test_icontract_lint.py::TestCheckFile::test_wo_contracts", "tests/test_icontract_lint.py::TestCheckPaths::test_directory", "tests/test_icontract_lint.py::TestCheckPaths::test_empty", "tests/test_icontract_lint.py::TestCheckPaths::test_file", "tests/test_icontract_lint.py::TestOutputVerbose::test_empty", "tests/test_icontract_lint.py::TestOutputVerbose::test_errors", "tests/test_icontract_lint.py::TestOutputJson::test_empty", "tests/test_icontract_lint.py::TestOutputJson::test_errors" ]
[]
MIT License
8,778
546
[ "icontract_lint/__init__.py" ]
googleapis__python-bigtable-157
02783630c28de3d1bc3e17be67c6fa87e8e64ef0
2020-10-23 20:34:33
02783630c28de3d1bc3e17be67c6fa87e8e64ef0
diff --git a/google/cloud/bigtable/backup.py b/google/cloud/bigtable/backup.py index 03a1c894..291ac783 100644 --- a/google/cloud/bigtable/backup.py +++ b/google/cloud/bigtable/backup.py @@ -21,6 +21,7 @@ from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import ( BigtableTableAdminClient, ) from google.cloud.bigtable_admin_v2.types import table_pb2 +from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 @@ -392,3 +393,54 @@ class Backup(object): """ api = self._instance._client.table_admin_client return api.restore_table(self._instance.name, table_id, self.name) + + def get_iam_policy(self): + """Gets the IAM access control policy for this backup. + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this backup. + """ + table_api = self._instance._client.table_admin_client + args = {"resource": self.name} + response = table_api.get_iam_policy(**args) + return Policy.from_pb(response) + + def set_iam_policy(self, policy): + """Sets the IAM access control policy for this backup. Replaces any + existing policy. + + For more information about policy, please see documentation of + class `google.cloud.bigtable.policy.Policy` + + :type policy: :class:`google.cloud.bigtable.policy.Policy` + :param policy: A new IAM policy to replace the current IAM policy + of this backup. + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this backup. + """ + table_api = self._instance._client.table_admin_client + response = table_api.set_iam_policy(resource=self.name, policy=policy.to_pb()) + return Policy.from_pb(response) + + def test_iam_permissions(self, permissions): + """Tests whether the caller has the given permissions for this backup. + Returns the permissions that the caller has. + + :type permissions: list + :param permissions: The set of permissions to check for + the ``resource``. Permissions with wildcards (such as '*' + or 'storage.*') are not allowed. For more information see + `IAM Overview + <https://cloud.google.com/iam/docs/overview#permissions>`_. + `Bigtable Permissions + <https://cloud.google.com/bigtable/docs/access-control>`_. + + :rtype: list + :returns: A List(string) of permissions allowed on the backup. + """ + table_api = self._instance._client.table_admin_client + response = table_api.test_iam_permissions( + resource=self.name, permissions=permissions + ) + return list(response.permissions) diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 950a8c3f..35ca43d2 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -20,9 +20,9 @@ from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import NotFound from google.api_core.exceptions import RetryError from google.api_core.exceptions import ServiceUnavailable +from google.api_core.gapic_v1.method import DEFAULT from google.api_core.retry import if_exception_type from google.api_core.retry import Retry -from google.api_core.gapic_v1.method import wrap_method from google.cloud._helpers import _to_bytes from google.cloud.bigtable.backup import Backup from google.cloud.bigtable.column_family import _gc_rule_from_pb @@ -625,7 +625,7 @@ class Table(object): ) return self.read_rows(**kwargs) - def mutate_rows(self, rows, retry=DEFAULT_RETRY): + def mutate_rows(self, rows, retry=DEFAULT_RETRY, timeout=DEFAULT): """Mutates multiple rows in bulk. For example: @@ -656,17 +656,23 @@ class Table(object): the :meth:`~google.api_core.retry.Retry.with_delay` method or the :meth:`~google.api_core.retry.Retry.with_deadline` method. + :type timeout: float + :param timeout: number of seconds bounding retries for the call + :rtype: list :returns: A list of response statuses (`google.rpc.status_pb2.Status`) corresponding to success or failure of each row mutation sent. These will be in the same order as the `rows`. """ + if timeout is DEFAULT: + timeout = self.mutation_timeout + retryable_mutate_rows = _RetryableMutateRowsWorker( self._instance._client, self.name, rows, app_profile_id=self._app_profile_id, - timeout=self.mutation_timeout, + timeout=timeout, ) return retryable_mutate_rows(retry=retry) @@ -1058,27 +1064,20 @@ class _RetryableMutateRowsWorker(object): # All mutations are either successful or non-retryable now. return self.responses_statuses - mutate_rows_request = _mutate_rows_request( - self.table_name, retryable_rows, app_profile_id=self.app_profile_id - ) + entries = _compile_mutation_entries(self.table_name, retryable_rows) data_client = self.client.table_data_client - inner_api_calls = data_client._inner_api_calls - if "mutate_rows" not in inner_api_calls: - default_retry = (data_client._method_configs["MutateRows"].retry,) - if self.timeout is None: - default_timeout = data_client._method_configs["MutateRows"].timeout - else: - default_timeout = timeout.ExponentialTimeout(deadline=self.timeout) - data_client._inner_api_calls["mutate_rows"] = wrap_method( - data_client.transport.mutate_rows, - default_retry=default_retry, - default_timeout=default_timeout, - client_info=data_client._client_info, - ) + + kwargs = {} + if self.timeout is not None: + kwargs["timeout"] = timeout.ExponentialTimeout(deadline=self.timeout) try: - responses = data_client._inner_api_calls["mutate_rows"]( - mutate_rows_request, retry=None + responses = data_client.mutate_rows( + self.table_name, + entries, + app_profile_id=self.app_profile_id, + retry=None, + **kwargs ) except (ServiceUnavailable, DeadlineExceeded, Aborted): # If an exception, considered retryable by `RETRY_CODES`, is @@ -1260,8 +1259,8 @@ def _create_row_request( return message -def _mutate_rows_request(table_name, rows, app_profile_id=None): - """Creates a request to mutate rows in a table. +def _compile_mutation_entries(table_name, rows): + """Create list of mutation entries :type table_name: str :param table_name: The name of the table to write to. @@ -1269,29 +1268,29 @@ def _mutate_rows_request(table_name, rows, app_profile_id=None): :type rows: list :param rows: List or other iterable of :class:`.DirectRow` instances. - :type: app_profile_id: str - :param app_profile_id: (Optional) The unique name of the AppProfile. - - :rtype: :class:`data_messages_v2_pb2.MutateRowsRequest` - :returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs. + :rtype: List[:class:`data_messages_v2_pb2.MutateRowsRequest.Entry`] + :returns: entries corresponding to the inputs. :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is - greater than 100,000 - """ - request_pb = data_messages_v2_pb2.MutateRowsRequest( - table_name=table_name, app_profile_id=app_profile_id + greater than the max ({}) + """.format( + _MAX_BULK_MUTATIONS ) + entries = [] mutations_count = 0 + entry_klass = data_messages_v2_pb2.MutateRowsRequest.Entry + for row in rows: _check_row_table_name(table_name, row) _check_row_type(row) mutations = row._get_mutations() - request_pb.entries.add(row_key=row.row_key, mutations=mutations) + entries.append(entry_klass(row_key=row.row_key, mutations=mutations)) mutations_count += len(mutations) + if mutations_count > _MAX_BULK_MUTATIONS: raise TooManyMutationsError( "Maximum number of mutations is %s" % (_MAX_BULK_MUTATIONS,) ) - return request_pb + return entries def _check_row_table_name(table_name, row): diff --git a/synth.py b/synth.py index 8a2fed1c..21100c74 100644 --- a/synth.py +++ b/synth.py @@ -45,40 +45,28 @@ library = gapic.py_library( s.move(library / "google/cloud/bigtable_admin_v2") s.move(library / "tests") -s.replace( - [ - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", - ], - "'google-cloud-bigtable-admin'", - "'google-cloud-bigtable'", -) - -s.replace( - "google/**/*.py", - "from google\.cloud\.bigtable\.admin_v2.proto", - "from google.cloud.bigtable_admin_v2.proto", -) +# Work around non-standard installations -s.replace( - ["google/cloud/bigtable_admin_v2/__init__.py"], - " __doc__ = bigtable_instance_admin_client." - "BigtableInstanceAdminClient.__doc__\n", - " __doc__ = (\n" - " bigtable_instance_admin_client.BigtableInstanceAdminClient." - "__doc__)\n", -) - -s.replace( - ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], - "if ``true_mutations`` is empty, and at most\n\n\s*100000.", - "if ``true_mutations`` is empty, and at most 100000.", -) +admin_clients = [ + "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", +] s.replace( - ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], - "if ``false_mutations`` is empty, and at most\n\n\s*100000.", - "if ``false_mutations`` is empty, and at most 100000.", + admin_clients, + """\ +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution\( + 'google-cloud-bigtable-admin', +\).version +""", + """\ +try: + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable" + ).version +except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GAPIC_LIBRARY_VERSION = None +""" ) # ----------------------------------------------------------------------------
BigTable: 'Table.mutate_rows' deadline exceeded for large mutations Deadline exceeded for large mutation of BigQuery table; not necessarily a bug perhaps it would be helpful to other to add this to the docs, automatically batch mutations by size, make the timeout configurable, or document large data transfers as a potential cause of exceeding the deadline. #### Environment details * Cloud BigTable * Ubuntu 18.04 * Python 3.6.4 * google-cloud-bigtable==0.31.1 #### Steps to reproduce and code example Changing the value of `mutation_batch_size` in the following, too large and the deadline is exceeded; with tfexample serialized video examples with 4 frames of size 224x224. Not necessarily a bug if this is the expected behavior and users should handle this kind of batching themselves. ```python def iterable_dataset_from_file(filename): dataset = tf.data.TFRecordDataset(filename) iterator = dataset.make_initializable_iterator() next_element = iterator.get_next() with tf.Session() as sess: sess.run(iterator.initializer) i = 0 while True: try: if i % 1000 == 0: print("Processed %s examples..." % i) yield sess.run(next_element) i += 1 except tf.errors.OutOfRangeError: print("Ran out of examples (processed %s), exiting..." % i) break def tfrecord_files_to_cbt_table(glob, table, selection, max_records=100000000, mutation_batch_size=250): mutation_index = 0 def new_mutation_batch(): return [None for _ in range(mutation_batch_size)] files = tf.gfile.Glob(glob) for file_path in files: row_mutation_batch = new_mutation_batch() for i, example in enumerate(iterable_dataset_from_file(file_path)): idx = hashlib.md5(example).hexdigest() # DEV: To check "shuffle" effect add the id suffix idx = "_".join([selection.prefix, idx, str(i)]) row = table.row(idx) row.set_cell(column_family_id=selection.column_family, column=selection.column_qualifier, value=example, timestamp=datetime.datetime.utcnow()) row_mutation_batch[mutation_index] = row if mutation_index == (mutation_batch_size - 1): table.mutate_rows(row_mutation_batch) row_mutation_batch = new_mutation_batch() mutation_index = 0 else: mutation_index += 1 final_mutation = row_mutation_batch[:(mutation_index-1)] if final_mutation: table.mutate_rows(final_mutation) ``` #### Stack trace ``` Traceback (most recent call last): File "/home/jovyan/.local/lib/python3.6/site-packages/google/api_core/grpc_helpers.py", line 79, in next return six.next(self._wrapped) File "/opt/conda/lib/python3.6/site-packages/grpc/_channel.py", line 341, in __next__ return self._next() File "/opt/conda/lib/python3.6/site-packages/grpc/_channel.py", line 335, in _next raise self grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with (StatusCode.DEADLINE_EXCEEDED, Deadline Exceeded)> The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/jovyan/work/pcml/pcml/operations/tfrecord2bigtable.py", line 330, in <module> tf.app.run() File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 125, in run _sys.exit(main(argv)) File "/home/jovyan/work/pcml/pcml/operations/tfrecord2bigtable.py", line 300, in main max_records=FLAGS.max_records) File "/home/jovyan/work/pcml/pcml/operations/tfrecord2bigtable.py", line 228, in tfrecord_files_to_cbt_table table.mutate_rows(row_mutation_batch) File "/home/jovyan/.local/lib/python3.6/site-packages/google/cloud/bigtable/table.py", line 423, in mutate_rows return retryable_mutate_rows(retry=retry) File "/home/jovyan/.local/lib/python3.6/site-packages/google/cloud/bigtable/table.py", line 571, in __call__ mutate_rows() File "/home/jovyan/.local/lib/python3.6/site-packages/google/api_core/retry.py", line 270, in retry_wrapped_func on_error=on_error, File "/home/jovyan/.local/lib/python3.6/site-packages/google/api_core/retry.py", line 179, in retry_target return target() File "/home/jovyan/.local/lib/python3.6/site-packages/google/cloud/bigtable/table.py", line 634, in _do_mutate_retryable_rows for response in responses: File "/home/jovyan/.local/lib/python3.6/site-packages/google/api_core/grpc_helpers.py", line 81, in next six.raise_from(exceptions.from_grpc_error(exc), exc) File "<string>", line 3, in raise_from google.api_core.exceptions.DeadlineExceeded: 504 Deadline Exceeded ```
googleapis/python-bigtable
diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 2f263dff..0285d668 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -734,6 +734,113 @@ class TestBackup(unittest.TestCase): backup=self.BACKUP_NAME, ) + def test_get_iam_policy(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=self.INSTANCE_ID) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:[email protected]", "user:[email protected]"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + client._table_admin_client = table_api + table_api.get_iam_policy.return_value = iam_policy + + result = backup.get_iam_policy() + + table_api.get_iam_policy.assert_called_once_with(resource=backup.name) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + + def test_set_iam_policy(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=self.INSTANCE_ID) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:[email protected]", "user:[email protected]"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] + iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + client._table_admin_client = table_api + table_api.set_iam_policy.return_value = iam_policy_pb + + iam_policy = Policy(etag=etag, version=version) + iam_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("[email protected]"), + Policy.service_account("[email protected]"), + ] + + result = backup.set_iam_policy(iam_policy) + + table_api.set_iam_policy.assert_called_once_with( + resource=backup.name, policy=iam_policy_pb + ) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + + def test_test_iam_permissions(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import iam_policy_pb2 + + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=self.INSTANCE_ID) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + permissions = ["bigtable.backups.create", "bigtable.backups.list"] + + response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + table_api.test_iam_permissions.return_value = response + client._table_admin_client = table_api + + result = backup.test_iam_permissions(permissions) + + self.assertEqual(result, permissions) + table_api.test_iam_permissions.assert_called_once_with( + resource=backup.name, permissions=permissions + ) + class _Client(object): def __init__(self, project=TestBackup.PROJECT_ID): diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index c99cd659..4469846b 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -20,14 +20,14 @@ from ._testing import _make_credentials from google.api_core.exceptions import DeadlineExceeded -class Test___mutate_rows_request(unittest.TestCase): +class Test__compile_mutation_entries(unittest.TestCase): def _call_fut(self, table_name, rows): - from google.cloud.bigtable.table import _mutate_rows_request + from google.cloud.bigtable.table import _compile_mutation_entries - return _mutate_rows_request(table_name, rows) + return _compile_mutation_entries(table_name, rows) @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) - def test__mutate_rows_too_many_mutations(self): + def test_w_too_many_mutations(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import TooManyMutationsError @@ -41,13 +41,15 @@ class Test___mutate_rows_request(unittest.TestCase): rows[0].set_cell("cf1", b"c1", 2) rows[1].set_cell("cf1", b"c1", 3) rows[1].set_cell("cf1", b"c1", 4) + with self.assertRaises(TooManyMutationsError): self._call_fut("table", rows) - def test__mutate_rows_request(self): + def test_normal(self): from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable_v2.proto import bigtable_pb2 - table = mock.Mock(name="table", spec=["name"]) + table = mock.Mock(spec=["name"]) table.name = "table" rows = [ DirectRow(row_key=b"row_key", table=table), @@ -55,25 +57,26 @@ class Test___mutate_rows_request(unittest.TestCase): ] rows[0].set_cell("cf1", b"c1", b"1") rows[1].set_cell("cf1", b"c1", b"2") + result = self._call_fut("table", rows) - expected_result = _mutate_rows_request_pb(table_name="table") - entry1 = expected_result.entries.add() - entry1.row_key = b"row_key" - mutations1 = entry1.mutations.add() - mutations1.set_cell.family_name = "cf1" - mutations1.set_cell.column_qualifier = b"c1" - mutations1.set_cell.timestamp_micros = -1 - mutations1.set_cell.value = b"1" - entry2 = expected_result.entries.add() - entry2.row_key = b"row_key_2" - mutations2 = entry2.mutations.add() - mutations2.set_cell.family_name = "cf1" - mutations2.set_cell.column_qualifier = b"c1" - mutations2.set_cell.timestamp_micros = -1 - mutations2.set_cell.value = b"2" + Entry = bigtable_pb2.MutateRowsRequest.Entry - self.assertEqual(result, expected_result) + entry_1 = Entry(row_key=b"row_key") + mutations_1 = entry_1.mutations.add() + mutations_1.set_cell.family_name = "cf1" + mutations_1.set_cell.column_qualifier = b"c1" + mutations_1.set_cell.timestamp_micros = -1 + mutations_1.set_cell.value = b"1" + + entry_2 = Entry(row_key=b"row_key_2") + mutations_2 = entry_2.mutations.add() + mutations_2.set_cell.family_name = "cf1" + mutations_2.set_cell.column_qualifier = b"c1" + mutations_2.set_cell.timestamp_micros = -1 + mutations_2.set_cell.value = b"2" + + self.assertEqual(result, [entry_1, entry_2]) class Test__check_row_table_name(unittest.TestCase): @@ -162,27 +165,49 @@ class TestTable(unittest.TestCase): def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) - def test_constructor_w_admin(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT_ID, credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) + def test_constructor_defaults(self): + instance = mock.Mock(spec=[]) + table = self._make_one(self.TABLE_ID, instance) + + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertIs(table._instance, instance) + self.assertIsNone(table.mutation_timeout) + self.assertIsNone(table._app_profile_id) + + def test_constructor_explicit(self): + instance = mock.Mock(spec=[]) + mutation_timeout = 123 + app_profile_id = "profile-123" + + table = self._make_one( + self.TABLE_ID, + instance, + mutation_timeout=mutation_timeout, + app_profile_id=app_profile_id, + ) + self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance._client, client) - self.assertEqual(table.name, self.TABLE_NAME) + self.assertIs(table._instance, instance) + self.assertEqual(table.mutation_timeout, mutation_timeout) + self.assertEqual(table._app_profile_id, app_profile_id) - def test_constructor_wo_admin(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT_ID, credentials=credentials, admin=False + def test_name(self): + table_data_client = mock.Mock(spec=["table_path"]) + client = mock.Mock( + project=self.PROJECT_ID, + table_data_client=table_data_client, + spec=["project", "table_data_client"], ) - instance = client.instance(instance_id=self.INSTANCE_ID) + instance = mock.Mock( + _client=client, + instance_id=self.INSTANCE_ID, + spec=["_client", "instance_id"], + ) + table = self._make_one(self.TABLE_ID, instance) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance._client, client) - self.assertEqual(table.name, self.TABLE_NAME) + + self.assertEqual(table.name, table_data_client.table_path.return_value) def _row_methods_helper(self): client = self._make_client( @@ -620,8 +645,11 @@ class TestTable(unittest.TestCase): with self.assertRaises(ValueError): self._read_row_helper(chunks, None) - def test_mutate_rows(self): + def _mutate_rows_helper( + self, mutation_timeout=None, app_profile_id=None, retry=None, timeout=None + ): from google.rpc.status_pb2 import Status + from google.cloud.bigtable.table import DEFAULT_RETRY from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( @@ -633,21 +661,78 @@ class TestTable(unittest.TestCase): ) instance = client.instance(instance_id=self.INSTANCE_ID) client._table_admin_client = table_api - table = self._make_one(self.TABLE_ID, instance) + ctor_kwargs = {} - response = [Status(code=0), Status(code=1)] + if mutation_timeout is not None: + ctor_kwargs["mutation_timeout"] = mutation_timeout + + if app_profile_id is not None: + ctor_kwargs["app_profile_id"] = app_profile_id - mock_worker = mock.Mock(return_value=response) - with mock.patch( + table = self._make_one(self.TABLE_ID, instance, **ctor_kwargs) + + rows = [mock.MagicMock(), mock.MagicMock()] + response = [Status(code=0), Status(code=1)] + instance_mock = mock.Mock(return_value=response) + klass_mock = mock.patch( "google.cloud.bigtable.table._RetryableMutateRowsWorker", - new=mock.MagicMock(return_value=mock_worker), - ): - statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) + new=mock.MagicMock(return_value=instance_mock), + ) + + call_kwargs = {} + + if retry is not None: + call_kwargs["retry"] = retry + + if timeout is not None: + expected_timeout = call_kwargs["timeout"] = timeout + else: + expected_timeout = mutation_timeout + + with klass_mock: + statuses = table.mutate_rows(rows, **call_kwargs) + result = [status.code for status in statuses] expected_result = [0, 1] - self.assertEqual(result, expected_result) + klass_mock.new.assert_called_once_with( + client, + self.TABLE_NAME, + rows, + app_profile_id=app_profile_id, + timeout=expected_timeout, + ) + + if retry is not None: + instance_mock.assert_called_once_with(retry=retry) + else: + instance_mock.assert_called_once_with(retry=DEFAULT_RETRY) + + def test_mutate_rows_w_default_mutation_timeout_app_profile_id(self): + self._mutate_rows_helper() + + def test_mutate_rows_w_mutation_timeout(self): + mutation_timeout = 123 + self._mutate_rows_helper(mutation_timeout=mutation_timeout) + + def test_mutate_rows_w_app_profile_id(self): + app_profile_id = "profile-123" + self._mutate_rows_helper(app_profile_id=app_profile_id) + + def test_mutate_rows_w_retry(self): + retry = mock.Mock() + self._mutate_rows_helper(retry=retry) + + def test_mutate_rows_w_timeout_arg(self): + timeout = 123 + self._mutate_rows_helper(timeout=timeout) + + def test_mutate_rows_w_mutation_timeout_and_timeout_arg(self): + mutation_timeout = 123 + timeout = 456 + self._mutate_rows_helper(mutation_timeout=mutation_timeout, timeout=timeout) + def test_read_rows(self): from google.cloud._testing import _Monkey from google.cloud.bigtable.row_data import PartialRowsData @@ -1424,21 +1509,18 @@ class Test__RetryableMutateRowsWorker(unittest.TestCase): row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") - response = self._make_responses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - ) + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - with mock.patch("google.cloud.bigtable.table.wrap_method") as patched: - patched.return_value = mock.Mock(return_value=[response]) + response_codes = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + response = self._make_responses(response_codes) + data_api.mutate_rows = mock.MagicMock(return_value=[response]) - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - statuses = worker(retry=None) + statuses = worker(retry=None) result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + self.assertEqual(result, response_codes) - client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once() - self.assertEqual(result, expected_result) + data_api.mutate_rows.assert_called_once() def test_callable_retry(self): from google.cloud.bigtable.row import DirectRow
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 3 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock" ], "pre_install": null, "python": "3.8", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==4.2.4 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.6.1 exceptiongroup==1.2.2 google-api-core==1.34.1 google-auth==1.35.0 -e git+https://github.com/googleapis/python-bigtable.git@02783630c28de3d1bc3e17be67c6fa87e8e64ef0#egg=google_cloud_bigtable google-cloud-core==1.7.3 googleapis-common-protos==1.69.2 grpc-google-iam-v1==0.12.7 grpcio==1.70.0 grpcio-status==1.48.2 idna==3.10 iniconfig==2.1.0 mock==5.2.0 packaging==24.2 pluggy==1.5.0 protobuf==3.20.3 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest==8.3.5 pytest-cov==5.0.0 requests==2.32.3 rsa==4.9 six==1.17.0 tomli==2.2.1 urllib3==2.2.3
name: python-bigtable channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==4.2.4 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.6.1 - exceptiongroup==1.2.2 - google-api-core==1.34.1 - google-auth==1.35.0 - google-cloud-core==1.7.3 - googleapis-common-protos==1.69.2 - grpc-google-iam-v1==0.12.7 - grpcio==1.70.0 - grpcio-status==1.48.2 - idna==3.10 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - protobuf==3.20.3 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-cov==5.0.0 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - urllib3==2.2.3 prefix: /opt/conda/envs/python-bigtable
[ "tests/unit/test_backup.py::TestBackup::test_get_iam_policy", "tests/unit/test_backup.py::TestBackup::test_set_iam_policy", "tests/unit/test_backup.py::TestBackup::test_test_iam_permissions", "tests/unit/test_table.py::Test__compile_mutation_entries::test_normal", "tests/unit/test_table.py::Test__compile_mutation_entries::test_w_too_many_mutations", "tests/unit/test_table.py::TestTable::test_mutate_rows_w_mutation_timeout_and_timeout_arg", "tests/unit/test_table.py::TestTable::test_mutate_rows_w_timeout_arg", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_callable_no_retry_strategy" ]
[ "tests/unit/test_table.py::TestTable::test_yield_retry_rows", "tests/unit/test_table.py::TestTable::test_yield_rows_with_row_set" ]
[ "tests/unit/test_backup.py::TestBackup::test___eq__", "tests/unit/test_backup.py::TestBackup::test___eq__different_types", "tests/unit/test_backup.py::TestBackup::test___ne__", "tests/unit/test_backup.py::TestBackup::test___ne__same_value", "tests/unit/test_backup.py::TestBackup::test_constructor_defaults", "tests/unit/test_backup.py::TestBackup::test_constructor_non_defaults", "tests/unit/test_backup.py::TestBackup::test_create_already_exists", "tests/unit/test_backup.py::TestBackup::test_create_cluster_not_set", "tests/unit/test_backup.py::TestBackup::test_create_expire_time_not_set", "tests/unit/test_backup.py::TestBackup::test_create_grpc_error", "tests/unit/test_backup.py::TestBackup::test_create_instance_not_found", "tests/unit/test_backup.py::TestBackup::test_create_success", "tests/unit/test_backup.py::TestBackup::test_create_table_not_set", "tests/unit/test_backup.py::TestBackup::test_delete_grpc_error", "tests/unit/test_backup.py::TestBackup::test_delete_not_found", "tests/unit/test_backup.py::TestBackup::test_delete_success", "tests/unit/test_backup.py::TestBackup::test_exists_grpc_error", "tests/unit/test_backup.py::TestBackup::test_exists_not_found", "tests/unit/test_backup.py::TestBackup::test_exists_success", "tests/unit/test_backup.py::TestBackup::test_from_pb_bad_name", "tests/unit/test_backup.py::TestBackup::test_from_pb_instance_mismatch", "tests/unit/test_backup.py::TestBackup::test_from_pb_project_mismatch", "tests/unit/test_backup.py::TestBackup::test_from_pb_success", "tests/unit/test_backup.py::TestBackup::test_get", "tests/unit/test_backup.py::TestBackup::test_property_cluster", "tests/unit/test_backup.py::TestBackup::test_property_cluster_setter", "tests/unit/test_backup.py::TestBackup::test_property_end_time", "tests/unit/test_backup.py::TestBackup::test_property_expire_time", "tests/unit/test_backup.py::TestBackup::test_property_expire_time_setter", "tests/unit/test_backup.py::TestBackup::test_property_name", "tests/unit/test_backup.py::TestBackup::test_property_parent_none", "tests/unit/test_backup.py::TestBackup::test_property_parent_w_cluster", "tests/unit/test_backup.py::TestBackup::test_property_size", "tests/unit/test_backup.py::TestBackup::test_property_source_table_none", "tests/unit/test_backup.py::TestBackup::test_property_source_table_valid", "tests/unit/test_backup.py::TestBackup::test_property_start_time", "tests/unit/test_backup.py::TestBackup::test_property_state", "tests/unit/test_backup.py::TestBackup::test_reload", "tests/unit/test_backup.py::TestBackup::test_restore_cluster_not_set", "tests/unit/test_backup.py::TestBackup::test_restore_grpc_error", "tests/unit/test_backup.py::TestBackup::test_restore_success", "tests/unit/test_backup.py::TestBackup::test_update_expire_time_grpc_error", "tests/unit/test_backup.py::TestBackup::test_update_expire_time_not_found", "tests/unit/test_backup.py::TestBackup::test_update_expire_time_success", "tests/unit/test_table.py::Test__check_row_table_name::test_right_table_name", "tests/unit/test_table.py::Test__check_row_table_name::test_wrong_table_name", "tests/unit/test_table.py::Test__check_row_type::test_right_row_type", "tests/unit/test_table.py::Test__check_row_type::test_test_wrong_row_type", "tests/unit/test_table.py::TestTable::test___eq__", "tests/unit/test_table.py::TestTable::test___eq__type_differ", "tests/unit/test_table.py::TestTable::test___ne__", "tests/unit/test_table.py::TestTable::test___ne__same_value", "tests/unit/test_table.py::TestTable::test_append_row", "tests/unit/test_table.py::TestTable::test_backup_factory_defaults", "tests/unit/test_table.py::TestTable::test_backup_factory_non_defaults", "tests/unit/test_table.py::TestTable::test_conditional_row", "tests/unit/test_table.py::TestTable::test_constructor_defaults", "tests/unit/test_table.py::TestTable::test_constructor_explicit", "tests/unit/test_table.py::TestTable::test_create", "tests/unit/test_table.py::TestTable::test_create_with_families", "tests/unit/test_table.py::TestTable::test_create_with_split_keys", "tests/unit/test_table.py::TestTable::test_delete", "tests/unit/test_table.py::TestTable::test_direct_row", "tests/unit/test_table.py::TestTable::test_drop_by_prefix", "tests/unit/test_table.py::TestTable::test_drop_by_prefix_w_timeout", "tests/unit/test_table.py::TestTable::test_exists", "tests/unit/test_table.py::TestTable::test_get_cluster_states", "tests/unit/test_table.py::TestTable::test_get_iam_policy", "tests/unit/test_table.py::TestTable::test_list_backups_defaults", "tests/unit/test_table.py::TestTable::test_list_backups_w_options", "tests/unit/test_table.py::TestTable::test_list_column_families", "tests/unit/test_table.py::TestTable::test_mutate_rows_w_app_profile_id", "tests/unit/test_table.py::TestTable::test_mutate_rows_w_default_mutation_timeout_app_profile_id", "tests/unit/test_table.py::TestTable::test_mutate_rows_w_mutation_timeout", "tests/unit/test_table.py::TestTable::test_mutate_rows_w_retry", "tests/unit/test_table.py::TestTable::test_mutations_batcher_factory", "tests/unit/test_table.py::TestTable::test_name", "tests/unit/test_table.py::TestTable::test_read_retry_rows", "tests/unit/test_table.py::TestTable::test_read_row_complete", "tests/unit/test_table.py::TestTable::test_read_row_miss_no__responses", "tests/unit/test_table.py::TestTable::test_read_row_miss_no_chunks_in_response", "tests/unit/test_table.py::TestTable::test_read_row_more_than_one_row_returned", "tests/unit/test_table.py::TestTable::test_read_row_still_partial", "tests/unit/test_table.py::TestTable::test_read_rows", "tests/unit/test_table.py::TestTable::test_restore_table_w_backup_id", "tests/unit/test_table.py::TestTable::test_restore_table_w_backup_name", "tests/unit/test_table.py::TestTable::test_row_factory_append", "tests/unit/test_table.py::TestTable::test_row_factory_conditional", "tests/unit/test_table.py::TestTable::test_row_factory_direct", "tests/unit/test_table.py::TestTable::test_row_factory_failure", "tests/unit/test_table.py::TestTable::test_sample_row_keys", "tests/unit/test_table.py::TestTable::test_set_iam_policy", "tests/unit/test_table.py::TestTable::test_test_iam_permissions", "tests/unit/test_table.py::TestTable::test_truncate", "tests/unit/test_table.py::TestTable::test_truncate_w_timeout", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_callable_empty_rows", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_callable_retry", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_do_mutate_retryable_rows", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_do_mutate_retryable_rows_empty_rows", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_do_mutate_retryable_rows_mismatch_num_responses", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_do_mutate_retryable_rows_retry", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_do_mutate_retryable_rows_second_retry", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_do_mutate_retryable_rows_second_try", "tests/unit/test_table.py::Test__RetryableMutateRowsWorker::test_do_mutate_retryable_rows_second_try_no_retryable", "tests/unit/test_table.py::Test__create_row_request::test_row_range_both_keys", "tests/unit/test_table.py::Test__create_row_request::test_row_range_both_keys_inclusive", "tests/unit/test_table.py::Test__create_row_request::test_row_range_end_key", "tests/unit/test_table.py::Test__create_row_request::test_row_range_row_set_conflict", "tests/unit/test_table.py::Test__create_row_request::test_row_range_start_key", "tests/unit/test_table.py::Test__create_row_request::test_table_name_only", "tests/unit/test_table.py::Test__create_row_request::test_with_app_profile_id", "tests/unit/test_table.py::Test__create_row_request::test_with_filter", "tests/unit/test_table.py::Test__create_row_request::test_with_limit", "tests/unit/test_table.py::Test__create_row_request::test_with_row_set", "tests/unit/test_table.py::Test_ClusterState::test___eq__", "tests/unit/test_table.py::Test_ClusterState::test___eq__type_differ", "tests/unit/test_table.py::Test_ClusterState::test___ne__", "tests/unit/test_table.py::Test_ClusterState::test___ne__same_value", "tests/unit/test_table.py::Test_ClusterState::test__repr__" ]
[]
Apache License 2.0
8,779
2,655
[ "google/cloud/bigtable/backup.py", "google/cloud/bigtable/table.py", "synth.py" ]
sqlfluff__sqlfluff-505
be885072abac897a72bd98af567bfe01a04caa65
2020-10-24 23:11:17
60dbf358e2023dfa09073c0f337e106db7f1f9e4
diff --git a/src/sqlfluff/cli/commands.py b/src/sqlfluff/cli/commands.py index 002507991..b2497e7d1 100644 --- a/src/sqlfluff/cli/commands.py +++ b/src/sqlfluff/cli/commands.py @@ -13,11 +13,14 @@ import pstats from io import StringIO from benchit import BenchIt +# To enable colour cross platform +import colorama + from .formatters import ( format_rules, format_violation, format_linting_result_header, - format_linting_result_footer, + format_linting_stats, colorize, format_dialect_warning, format_dialects, @@ -35,7 +38,7 @@ class RedWarningsFilter(logging.Filter): def filter(self, record): """Filter any warnings (or above) to turn them red.""" if record.levelno >= logging.WARNING: - record.msg = colorize(record.msg, "red") + record.msg = colorize(record.msg, "red") + " " return True @@ -56,9 +59,14 @@ def set_logging_level(verbosity, logger=None): # Don't propagate logging fluff_logger.propagate = False + # Enable colorama + colorama.init() + # Set up the log handler to log to stdout handler = logging.StreamHandler(stream=sys.stdout) - handler.setFormatter(logging.Formatter("%(levelname)-10s %(message)s")) + # NB: the unicode character at the beginning is to squash any badly + # tamed ANSI colour statements, and return us to normality. + handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s")) # Set up a handler to colour warnings red. handler.addFilter(RedWarningsFilter()) if logger: @@ -294,14 +302,14 @@ def lint(paths, format, nofail, logger=None, **kwargs): # Set up logging. set_logging_level(verbosity=verbose, logger=logger) - # add stdin if specified via lone '-' if ("-",) == paths: # TODO: Remove verbose result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin") else: # Output the results as we go - click.echo(format_linting_result_header(verbose=verbose)) + if verbose >= 1: + click.echo(format_linting_result_header()) try: # TODO: Remove verbose result = lnt.lint_paths(paths, ignore_non_existent_files=False) @@ -316,7 +324,8 @@ def lint(paths, format, nofail, logger=None, **kwargs): ) sys.exit(1) # Output the final stats - click.echo(format_linting_result_footer(result, verbose=verbose)) + if verbose >= 1: + click.echo(format_linting_stats(result, verbose=verbose)) if format == "json": click.echo(json.dumps(result.as_records())) diff --git a/src/sqlfluff/cli/formatters.py b/src/sqlfluff/cli/formatters.py index c185a70a1..014ea2596 100644 --- a/src/sqlfluff/cli/formatters.py +++ b/src/sqlfluff/cli/formatters.py @@ -89,50 +89,40 @@ def format_linting_stats(result, verbose=0): """Format a set of stats given a `LintingResult`.""" text_buffer = StringIO() all_stats = result.stats() - if verbose >= 1: - text_buffer.write("==== summary ====\n") - if verbose >= 2: - output_fields = [ - "files", - "violations", - "clean files", - "unclean files", - "avg per file", - "unclean rate", - "status", - ] - special_formats = {"unclean rate": "{0:.0%}"} - else: - output_fields = ["violations", "status"] - special_formats = {} - # Generate content tuples, applying special formats for some fields - summary_content = [ - ( - key, - special_formats[key].format(all_stats[key]) - if key in special_formats - else all_stats[key], - ) - for key in output_fields + text_buffer.write("==== summary ====\n") + if verbose >= 2: + output_fields = [ + "files", + "violations", + "clean files", + "unclean files", + "avg per file", + "unclean rate", + "status", ] - # Render it all as a table - text_buffer.write(cli_table(summary_content, max_label_width=14)) + special_formats = {"unclean rate": "{0:.0%}"} + else: + output_fields = ["violations", "status"] + special_formats = {} + # Generate content tuples, applying special formats for some fields + summary_content = [ + ( + key, + special_formats[key].format(all_stats[key]) + if key in special_formats + else all_stats[key], + ) + for key in output_fields + ] + # Render it all as a table + text_buffer.write(cli_table(summary_content, max_label_width=14)) return text_buffer.getvalue() -def format_linting_result_header(verbose=0): +def format_linting_result_header(): """Format the header of a linting result output.""" text_buffer = StringIO() - if verbose >= 1: - text_buffer.write("==== readout ====\n") - return text_buffer.getvalue() - - -def format_linting_result_footer(result, verbose=0): - """Format the footer of a linting result output given a `LintingResult`.""" - text_buffer = StringIO() - text_buffer.write("\n") - text_buffer.write(format_linting_stats(result, verbose=verbose)) + text_buffer.write("==== readout ====\n") return text_buffer.getvalue() diff --git a/src/sqlfluff/cli/helpers.py b/src/sqlfluff/cli/helpers.py index 11dd7212d..28f14b44b 100644 --- a/src/sqlfluff/cli/helpers.py +++ b/src/sqlfluff/cli/helpers.py @@ -3,16 +3,17 @@ from io import StringIO import sys import textwrap +from colorama import Fore, Style from .. import __version__ as pkg_version color_lookup = { # Unicode literals here are important for PY2 - "red": u"\u001b[31m", - "green": u"\u001b[32m", - "blue": u"\u001b[36m", - "lightgrey": u"\u001b[30;1m", + "red": Fore.RED, + "green": Fore.GREEN, + "blue": Fore.BLUE, + "lightgrey": Fore.BLACK + Style.BRIGHT, } @@ -23,7 +24,7 @@ def colorize(s, color=None): """ if color: start_tag = color_lookup[color] - end_tag = u"\u001b[0m" + end_tag = Style.RESET_ALL return start_tag + s + end_tag else: return s diff --git a/src/sqlfluff/core/parser/segments_base.py b/src/sqlfluff/core/parser/segments_base.py index 7b120a0be..64fe110c6 100644 --- a/src/sqlfluff/core/parser/segments_base.py +++ b/src/sqlfluff/core/parser/segments_base.py @@ -373,22 +373,20 @@ class BaseSegment: """ return "" - def _preface(self, ident, tabsize, pos_idx, raw_idx): + def _preface(self, ident, tabsize): """Returns the preamble to any logging.""" - preface = " " * (ident * tabsize) - if self.is_meta: - preface += "[META] " - preface += self.__class__.__name__ + ":" - preface += " " * max(pos_idx - len(preface), 0) - if self.pos_marker: - preface += str(self.pos_marker) - else: - preface += "-" - sfx = self._suffix() - if sfx: - return preface + (" " * max(raw_idx - len(preface), 0)) + sfx - else: - return preface + padded_type = "{padding}{modifier}{type}".format( + padding=" " * (ident * tabsize), + modifier="[META] " if self.is_meta else "", + type=self.type + ":", + ) + preface = "{pos:17}|{padded_type:60} {suffix}".format( + pos=str(self.pos_marker) if self.pos_marker else "-", + padded_type=padded_type, + suffix=self._suffix() or "", + ) + # Trim unnecessary whitespace before returning + return preface.rstrip() @property def _comments(self): @@ -400,12 +398,10 @@ class BaseSegment: """Returns only the non-comment elements of this segment.""" return [seg for seg in self.segments if seg.type != "comment"] - def stringify(self, ident=0, tabsize=4, pos_idx=60, raw_idx=80, code_only=False): + def stringify(self, ident=0, tabsize=4, code_only=False): """Use indentation to render this segment and it's children as a string.""" buff = StringIO() - preface = self._preface( - ident=ident, tabsize=tabsize, pos_idx=pos_idx, raw_idx=raw_idx - ) + preface = self._preface(ident=ident, tabsize=tabsize) buff.write(preface + "\n") if not code_only and self.comment_seperate and len(self._comments) > 0: if self._comments: @@ -415,8 +411,6 @@ class BaseSegment: seg.stringify( ident=ident + 2, tabsize=tabsize, - pos_idx=pos_idx, - raw_idx=raw_idx, code_only=code_only, ) ) @@ -427,8 +421,6 @@ class BaseSegment: seg.stringify( ident=ident + 2, tabsize=tabsize, - pos_idx=pos_idx, - raw_idx=raw_idx, code_only=code_only, ) ) @@ -440,8 +432,6 @@ class BaseSegment: seg.stringify( ident=ident + 1, tabsize=tabsize, - pos_idx=pos_idx, - raw_idx=raw_idx, code_only=code_only, ) ) @@ -966,11 +956,9 @@ class RawSegment(BaseSegment): self.__class__.__name__, self.pos_marker, self.raw ) - def stringify(self, ident=0, tabsize=4, pos_idx=60, raw_idx=80, code_only=False): + def stringify(self, ident=0, tabsize=4, code_only=False): """Use indentation to render this segment and it's children as a string.""" - preface = self._preface( - ident=ident, tabsize=tabsize, pos_idx=pos_idx, raw_idx=raw_idx - ) + preface = self._preface(ident=ident, tabsize=tabsize) return preface + "\n" def _suffix(self):
CLI output has more newlines than it should. I was testing a query to debug #389 and realised that the output of sqlfluff has changed unexpectedly. # 0.3.5 ``` $ sqlfluff lint test10.sql == [test10.sql] FAIL L: 4 | P: 3 | L003 | Indentation not hanging or a multiple of 4 spaces L: 5 | P: 3 | L003 | Indentation not hanging or a multiple of 4 spaces L: 5 | P: 77 | L008 | Commas should be followed by a single whitespace unless | followed by a comment. ``` # 0.3.6 ``` $ sqlfluff lint test10.sql == [test10.sql] FAIL L: 4 | P: 3 | L003 | Indentation not hanging or a multiple of 4 spaces L: 5 | P: 3 | L003 | Indentation not hanging or a multiple of 4 spaces L: 5 | P: 77 | L008 | Commas should be followed by a single whitespace unless | followed by a comment. ``` Note the additional blank lines, which have found their way in. This issue is to chase down where they're coming from and a) remove them and b) put in place a test case which catches this kind of thing in future.
sqlfluff/sqlfluff
diff --git a/test/cli/commands_test.py b/test/cli/commands_test.py index 79347dd6c..23b64d4f4 100644 --- a/test/cli/commands_test.py +++ b/test/cli/commands_test.py @@ -35,11 +35,17 @@ def invoke_assert_code(ret_code=0, args=None, kwargs=None, cli_input=None): return result +expected_output = """== [test/fixtures/linter/indentation_error_simple.sql] FAIL +L: 2 | P: 4 | L003 | Indentation not hanging or a multiple of 4 spaces +L: 5 | P: 10 | L010 | Inconsistent capitalisation of keywords. +""" + + def test__cli__command_directed(): """Basic checking of lint functionality.""" result = invoke_assert_code( ret_code=65, - args=[lint, ["-n", "test/fixtures/linter/indentation_error_simple.sql"]], + args=[lint, ["test/fixtures/linter/indentation_error_simple.sql"]], ) # We should get a readout of what the error was check_a = "L: 2 | P: 4 | L003" @@ -47,6 +53,8 @@ def test__cli__command_directed(): check_b = "Indentation" assert check_a in result.output assert check_b in result.output + # Finally check the WHOLE output to make sure that unexpected newlines are not added + assert result.output == expected_output def test__cli__command_dialect(): diff --git a/test/cli/formatters_test.py b/test/cli/formatters_test.py index ab437c065..eaebaf79b 100644 --- a/test/cli/formatters_test.py +++ b/test/cli/formatters_test.py @@ -21,12 +21,6 @@ def test__cli__formatters__filename_nocol(): assert escape_ansi(res) == "== [blahblah] PASS" -def test__cli__formatters__filename_col(): - """Explicity test color codes.""" - res = format_filename("blah", success=False) - assert res == u"== [\u001b[30;1mblah\u001b[0m] \u001b[31mFAIL\u001b[0m" - - def test__cli__formatters__violation(): """Test formatting violations. diff --git a/test/core/parser/segments_base_test.py b/test/core/parser/segments_base_test.py index 00643b6c8..af76857b3 100644 --- a/test/core/parser/segments_base_test.py +++ b/test/core/parser/segments_base_test.py @@ -50,8 +50,8 @@ def test__parser__base_segments_raw(raw_seg): # Check Formatting and Stringification assert str(raw_seg) == repr(raw_seg) == "<RawSegment: ([3](1, 1, 4)) 'foobar'>" assert ( - raw_seg.stringify(ident=1, tabsize=2, pos_idx=20, raw_idx=35) - == " RawSegment: [3](1, 1, 4) 'foobar'\n" + raw_seg.stringify(ident=1, tabsize=2) + == "[3](1, 1, 4) | raw: 'foobar'\n" ) # Check tuple assert raw_seg.to_tuple() == ("raw", ()) @@ -76,10 +76,10 @@ def test__parser__base_segments_base(raw_seg_list): ) # Check Formatting and Stringification assert str(base_seg) == repr(base_seg) == "<DummySegment: ([3](1, 1, 4))>" - assert base_seg.stringify(ident=1, tabsize=2, pos_idx=20, raw_idx=35) == ( - " DummySegment: [3](1, 1, 4)\n" - " RawSegment: [3](1, 1, 4) 'foobar'\n" - " RawSegment: [9](1, 1, 10) '.barfoo'\n" + assert base_seg.stringify(ident=1, tabsize=2) == ( + "[3](1, 1, 4) | dummy:\n" + "[3](1, 1, 4) | raw: 'foobar'\n" + "[9](1, 1, 10) | raw: '.barfoo'\n" )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 4 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 bench-it==1.0.1 click==8.1.8 colorama==0.4.6 configparser==7.2.0 diff-cover==2.6.1 exceptiongroup==1.2.2 importlib_metadata==8.6.1 inflect==7.5.0 iniconfig==2.1.0 Jinja2==3.1.6 jinja2-pluralize==0.3.0 MarkupSafe==3.0.2 more-itertools==10.6.0 oyaml==1.0 packaging==24.2 pathspec==0.12.1 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 PyYAML==6.0.2 six==1.17.0 -e git+https://github.com/sqlfluff/sqlfluff.git@be885072abac897a72bd98af567bfe01a04caa65#egg=sqlfluff tomli==2.2.1 typeguard==4.4.2 typing_extensions==4.13.0 zipp==3.21.0
name: sqlfluff channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - bench-it==1.0.1 - click==8.1.8 - colorama==0.4.6 - configparser==7.2.0 - diff-cover==2.6.1 - exceptiongroup==1.2.2 - importlib-metadata==8.6.1 - inflect==7.5.0 - iniconfig==2.1.0 - jinja2==3.1.6 - jinja2-pluralize==0.3.0 - markupsafe==3.0.2 - more-itertools==10.6.0 - oyaml==1.0 - packaging==24.2 - pathspec==0.12.1 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - pyyaml==6.0.2 - six==1.17.0 - tomli==2.2.1 - typeguard==4.4.2 - typing-extensions==4.13.0 - zipp==3.21.0 prefix: /opt/conda/envs/sqlfluff
[ "test/cli/commands_test.py::test__cli__command_directed", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw", "test/core/parser/segments_base_test.py::test__parser__base_segments_base" ]
[]
[ "test/cli/commands_test.py::test__cli__command_dialect", "test/cli/commands_test.py::test__cli__command_lint_stdin[command0]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command1]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command2]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command3]", "test/cli/commands_test.py::test__cli__command_lint_parse[command0]", "test/cli/commands_test.py::test__cli__command_lint_parse[command1]", "test/cli/commands_test.py::test__cli__command_lint_parse[command2]", "test/cli/commands_test.py::test__cli__command_lint_parse[command3]", "test/cli/commands_test.py::test__cli__command_lint_parse[command4]", "test/cli/commands_test.py::test__cli__command_lint_parse[command5]", "test/cli/commands_test.py::test__cli__command_lint_parse[command6]", "test/cli/commands_test.py::test__cli__command_lint_parse[command7]", "test/cli/commands_test.py::test__cli__command_lint_parse[command8]", "test/cli/commands_test.py::test__cli__command_lint_parse[command9]", "test/cli/commands_test.py::test__cli__command_lint_parse[command10]", "test/cli/commands_test.py::test__cli__command_lint_parse[command11]", "test/cli/commands_test.py::test__cli__command_lint_parse[command12]", "test/cli/commands_test.py::test__cli__command_lint_parse[command13]", "test/cli/commands_test.py::test__cli__command_lint_parse[command14]", "test/cli/commands_test.py::test__cli__command_lint_parse[command15]", "test/cli/commands_test.py::test__cli__command_lint_parse[command16]", "test/cli/commands_test.py::test__cli__command_lint_parse[command17]", "test/cli/commands_test.py::test__cli__command_lint_parse[command18]", "test/cli/commands_test.py::test__cli__command_lint_parse[command19]", "test/cli/commands_test.py::test__cli__command_lint_parse[command20]", "test/cli/commands_test.py::test__cli__command_lint_parse[command21]", "test/cli/commands_test.py::test__cli__command_lint_parse[command22]", "test/cli/commands_test.py::test__cli__command_versioning", "test/cli/commands_test.py::test__cli__command_version", "test/cli/commands_test.py::test__cli__command_rules", "test/cli/commands_test.py::test__cli__command_dialects", "test/cli/commands_test.py::test__cli__command__fix[L001-test/fixtures/linter/indentation_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/whitespace_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/indentation_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L003-test/fixtures/linter/indentation_error_hard.sql]", "test/cli/commands_test.py::test__cli__command_fix_stdin[select", "test/cli/commands_test.py::test__cli__command_fix_stdin[", "test/cli/commands_test.py::test__cli__command_fix_stdin[SELECT", "test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-y-0]", "test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-n-65]", "test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[yaml]", "test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[json]", "test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[select", "test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[SElect", "test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command0]", "test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command1]", "test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[yaml]", "test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[json]", "test/cli/commands_test.py::test___main___help", "test/cli/formatters_test.py::test__cli__formatters__filename_nocol", "test/cli/formatters_test.py::test__cli__formatters__violation", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw_init", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw_compare", "test/core/parser/segments_base_test.py::test__parser__base_segments_base_compare" ]
[]
MIT License
8,787
2,710
[ "src/sqlfluff/cli/commands.py", "src/sqlfluff/cli/formatters.py", "src/sqlfluff/cli/helpers.py", "src/sqlfluff/core/parser/segments_base.py" ]
frmdstryr__magicattr-3
4531a6bfc64ef9b9b0c2eb01c4a3cf9a5bfd00e9
2020-10-26 00:04:42
4531a6bfc64ef9b9b0c2eb01c4a3cf9a5bfd00e9
diff --git a/magicattr.py b/magicattr.py index dc8343e..282e186 100644 --- a/magicattr.py +++ b/magicattr.py @@ -17,7 +17,7 @@ _AST_TYPES = (ast.Name, ast.Attribute, ast.Subscript, ast.Call) _STRING_TYPE = basestring if sys.version_info.major == 2 else str -def get(obj, attr): +def get(obj, attr, **kwargs): """ A getattr that supports nested lookups on objects, dicts, lists, and any combination in between. @@ -27,13 +27,24 @@ def get(obj, attr): An object to lookup the attribute on attr: String A attribute string to lookup - + kwargs: + default: Any + A default value used as a fallback if attr doesn't exist + Returns ------- result: Object - The object retrieved + The object retrieved or the default fallback value, if it was passed """ - return reduce(_lookup, _parse(attr), obj) + for chunk in _parse(attr): + try: + obj = _lookup(obj, chunk) + except Exception as ex: + if "default" in kwargs: + return kwargs["default"] + else: + raise ex + return obj def set(obj, attr, val):
Support default values in get() getattr() supports a default value if the attr doesn't exist. magicattr should mimic that behaviour.
frmdstryr/magicattr
diff --git a/tests.py b/tests.py index 8424d62..17698dd 100644 --- a/tests.py +++ b/tests.py @@ -60,6 +60,11 @@ def test_person_example(): # Nothing new assert magicattr.get(bob, 'age') == 31 + # Default value (optional) + with pytest.raises(AttributeError) as e: + magicattr.get(bob, 'weight') + assert magicattr.get(bob, 'weight', default=75) == 75 + # Lists assert magicattr.get(jill, 'friends[0].name') == 'Bob' assert magicattr.get(jack, 'friends[-1].age') == 29
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/frmdstryr/magicattr.git@4531a6bfc64ef9b9b0c2eb01c4a3cf9a5bfd00e9#egg=magicattr more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: magicattr channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/magicattr
[ "tests.py::test_person_example" ]
[]
[ "tests.py::test_magicattr_get[l-value0]", "tests.py::test_magicattr_get[t.t.t.t.z-z]", "tests.py::test_magicattr_get[a[0]-0]", "tests.py::test_magicattr_get[a[1][0]-1]", "tests.py::test_magicattr_get[a[1][2]-value4]", "tests.py::test_magicattr_get[b[\"x\"]-value5]", "tests.py::test_magicattr_get[b[\"x\"][\"y\"]-y]", "tests.py::test_magicattr_get[b[\"z\"]-value7]", "tests.py::test_magicattr_get[b[\"z\"][1]-2]", "tests.py::test_magicattr_get[b[\"w\"].z-z]", "tests.py::test_magicattr_get[b[\"w\"].t.l-value10]", "tests.py::test_magicattr_get[a[-1].z-z]", "tests.py::test_magicattr_get[l[-1]-2]", "tests.py::test_magicattr_get[a[2].t.a[-1].z-z]", "tests.py::test_magicattr_get[a[2].t.b[\"z\"][0]-1]", "tests.py::test_magicattr_get[a[-1].t.z-z]", "tests.py::test_empty" ]
[]
MIT License
8,793
325
[ "magicattr.py" ]
pimutils__khal-969
072927f435c965a99cc305a9a45fe2cf10e47f5a
2020-10-26 22:06:33
dbe70070c64b24dba7d2b725138beb8a54bfa183
diff --git a/khal/controllers.py b/khal/controllers.py index 39699f2..fdae8df 100644 --- a/khal/controllers.py +++ b/khal/controllers.py @@ -622,7 +622,7 @@ def import_event(vevent, collection, locale, batch, format=None, env=None): def print_ics(conf, name, ics, format): if format is None: - format = conf['view']['agenda_event_format'] + format = conf['view']['event_format'] cal = cal_from_ics(ics) events = [item for item in cal.walk() if item.name == 'VEVENT'] events_grouped = defaultdict(list)
Sensible default for printics printics currently uses agenda_event_format as default: https://github.com/pimutils/khal/blob/a401ef28076954779cc66885a05dd1fa7da46fe3/khal/controllers.py#L625 Therefore, the date of the event is missing, when it is used to quickly check the contents of an ICS file, kind of missing its purpose. If I do not miss another use case where a date context is somehow given, I suggest to use event_format as format. Or - in my opinion even better - define an own config value for printics_format that is used and can be tweaked for own purposes without affecting other parts of khal.
pimutils/khal
diff --git a/tests/cli_test.py b/tests/cli_test.py index 75f341e..8102ee9 100644 --- a/tests/cli_test.py +++ b/tests/cli_test.py @@ -705,7 +705,7 @@ def test_printics_read_from_stdin(runner): runner = runner(command='printics') result = runner.invoke(main_khal, ['printics'], input=_get_text('cal_d')) assert not result.exception - assert '1 events found in stdin input\n An Event\n' in result.output + assert '1 events found in stdin input\n09.04.-09.04. An Event\n' in result.output def test_configure_command_config_exists(runner):
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "freezegun", "vdirsyncer" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohttp==3.8.6 aiosignal==1.3.1 aiostream==0.4.5 async-timeout==4.0.3 asynctest==0.13.0 atomicwrites==1.4.1 attrs==24.2.0 backports.zoneinfo==0.2.1 certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 click-log==0.4.0 configobj==5.0.9 exceptiongroup==1.2.2 freezegun==1.5.1 frozenlist==1.3.3 icalendar==5.0.13 idna==3.10 importlib-metadata==6.7.0 iniconfig==2.0.0 -e git+https://github.com/pimutils/khal.git@072927f435c965a99cc305a9a45fe2cf10e47f5a#egg=khal multidict==6.0.5 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 python-dateutil==2.9.0.post0 pytz==2025.2 pyxdg==0.28 requests==2.31.0 six==1.17.0 tomli==2.0.1 typing_extensions==4.7.1 tzlocal==5.1 urllib3==2.0.7 urwid==2.6.16 vdirsyncer==0.19.3 wcwidth==0.2.13 yarl==1.9.4 zipp==3.15.0
name: khal channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohttp==3.8.6 - aiosignal==1.3.1 - aiostream==0.4.5 - async-timeout==4.0.3 - asynctest==0.13.0 - atomicwrites==1.4.1 - attrs==24.2.0 - backports-zoneinfo==0.2.1 - charset-normalizer==3.4.1 - click==8.1.8 - click-log==0.4.0 - configobj==5.0.9 - exceptiongroup==1.2.2 - freezegun==1.5.1 - frozenlist==1.3.3 - icalendar==5.0.13 - idna==3.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - multidict==6.0.5 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyxdg==0.28 - requests==2.31.0 - six==1.17.0 - tomli==2.0.1 - typing-extensions==4.7.1 - tzlocal==5.1 - urllib3==2.0.7 - urwid==2.6.16 - vdirsyncer==0.19.3 - wcwidth==0.2.13 - yarl==1.9.4 - zipp==3.15.0 prefix: /opt/conda/envs/khal
[ "tests/cli_test.py::test_printics_read_from_stdin" ]
[ "tests/cli_test.py::test_calendar", "tests/cli_test.py::test_long_calendar", "tests/cli_test.py::test_import_from_stdin", "tests/cli_test.py::test_interactive_command", "tests/cli_test.py::test_print_ics_command", "tests/cli_test.py::test_configure_command_cannot_write_config_file", "tests/cli_test.py::test_configure_command_cannot_create_vdir" ]
[ "tests/cli_test.py::test_direct_modification", "tests/cli_test.py::test_simple", "tests/cli_test.py::test_simple_color", "tests/cli_test.py::test_days", "tests/cli_test.py::test_notstarted", "tests/cli_test.py::test_default_command_empty", "tests/cli_test.py::test_invalid_calendar", "tests/cli_test.py::test_attach_calendar", "tests/cli_test.py::test_no_vevent[]", "tests/cli_test.py::test_no_vevent[BEGIN:VCALENDAR\\nBEGIN:VTODO\\nEND:VTODO\\nEND:VCALENDAR\\n]", "tests/cli_test.py::test_printformats", "tests/cli_test.py::test_at", "tests/cli_test.py::test_at_day_format", "tests/cli_test.py::test_list", "tests/cli_test.py::test_search", "tests/cli_test.py::test_no_default_new", "tests/cli_test.py::test_import", "tests/cli_test.py::test_import_proper", "tests/cli_test.py::test_import_proper_invalid_timezone", "tests/cli_test.py::test_import_invalid_choice_and_prefix", "tests/cli_test.py::test_color_option", "tests/cli_test.py::test_configure_command", "tests/cli_test.py::test_configure_command_config_exists", "tests/cli_test.py::test_configure_command_create_vdir", "tests/cli_test.py::test_configure_no_vdir", "tests/cli_test.py::test_edit", "tests/cli_test.py::test_new", "tests/cli_test.py::test_new_interactive", "tests/cli_test.py::test_debug", "tests/cli_test.py::test_new_interactive_extensive" ]
[]
MIT License
8,801
163
[ "khal/controllers.py" ]
SixTrack__pysixtrack-60
4ab545da7489efeceb3f36580ba59f4fc046a12e
2020-10-28 11:45:49
4ab545da7489efeceb3f36580ba59f4fc046a12e
diff --git a/pysixtrack/line.py b/pysixtrack/line.py index 7301b16..bd37201 100644 --- a/pysixtrack/line.py +++ b/pysixtrack/line.py @@ -328,6 +328,7 @@ class Line(Element): exact_drift=False, drift_threshold=1e-6, install_apertures=False, + apply_madx_errors=False, ): line = cls(elements=[], element_names=[]) @@ -342,6 +343,9 @@ class Line(Element): ): line.append_element(el, el_name) + if apply_madx_errors: + line._apply_madx_errors(sequence) + return line # error handling (alignment, multipole orders, ...): @@ -366,7 +370,7 @@ class Line(Element): idx_after_el = idx_el + 1 return idx_el, idx_after_el - def add_offset_error_to(self, element_name, dx=0, dy=0): + def _add_offset_error_to(self, element_name, dx=0, dy=0): idx_el, idx_after_el = self.find_element_ids(element_name) xyshift = elements.XYShift(dx=dx, dy=dy) inv_xyshift = elements.XYShift(dx=-dx, dy=-dy) @@ -375,7 +379,7 @@ class Line(Element): idx_after_el + 1, inv_xyshift, element_name + "_offset_out" ) - def add_aperture_offset_error_to(self, element_name, arex=0, arey=0): + def _add_aperture_offset_error_to(self, element_name, arex=0, arey=0): idx_el, idx_after_el = self.find_element_ids(element_name) idx_el_aper = idx_after_el - 1 if not self.element_names[idx_el_aper] == element_name + "_aperture": @@ -389,7 +393,7 @@ class Line(Element): idx_after_el + 1, inv_xyshift, element_name + "_aperture_offset_out" ) - def add_tilt_error_to(self, element_name, angle): + def _add_tilt_error_to(self, element_name, angle): '''Alignment error of transverse rotation around s-axis. The element corresponding to the given `element_name` gets wrapped by SRotation elements with rotation angle @@ -418,7 +422,7 @@ class Line(Element): self.insert_element(idx_el, srot, element_name + "_tilt_in") self.insert_element(idx_after_el + 1, inv_srot, element_name + "_tilt_out") - def add_multipole_error_to(self, element_name, knl=[], ksl=[]): + def _add_multipole_error_to(self, element_name, knl=[], ksl=[]): # will raise error if element not present: assert element_name in self.element_names element = self.elements[self.element_names.index(element_name)] @@ -435,13 +439,13 @@ class Line(Element): for i, component in enumerate(ksl): element.ksl[i] += component - def apply_madx_errors(self, error_table): - """Applies MAD-X error_table (with multipole errors, - dx and dy offset errors and dpsi tilt errors) - to existing elements in this Line instance. + def _apply_madx_errors(self, madx_sequence): + """Applies errors from MAD-X sequence to existing + elements in this Line instance. - Return error_table names which were not found in the - elements of this Line instance (and thus not treated). + Return names of MAD-X elements with existing align_errors + or field_errors which were not found in the elements of + this Line instance (and thus not treated). Example via cpymad: madx = cpymad.madx.Madx() @@ -449,83 +453,58 @@ class Line(Element): # (...set up lattice and errors in cpymad...) seq = madx.sequence.some_lattice - # store already applied errors: - madx.command.esave(file='lattice_errors.err') - madx.command.readtable( - file='lattice_errors.err', table="errors") - errors = madx.table.errors - - pysixtrack_line = Line.from_madx_sequence(seq) - pysixtrack_line.apply_madx_errors(errors) + pysixtrack_line = pysixtrack.Line.from_madx_sequence( + seq, + apply_madx_errors=True + ) """ - max_multipole_err = 0 - # check for errors in table which cannot be treated yet: - for error_type in error_table.keys(): - if error_type == "name": - continue - if any(error_table[error_type]): - if error_type in ["dx", "dy", "dpsi", "arex", "arey"]: - # available alignment error - continue - elif error_type[:1] == "k" and error_type[-1:] == "l": - # available multipole error - order = int("".join(c for c in error_type if c.isdigit())) - max_multipole_err = max(max_multipole_err, order) - else: - print( - f'Warning: MAD-X error type "{error_type}"' - " not implemented yet." - ) - elements_not_found = [] - for i_line, element_name in enumerate(error_table["name"]): + for element, element_name in zip( + madx_sequence.expanded_elements, + madx_sequence.expanded_element_names() + ): if element_name not in self.element_names: - elements_not_found.append(element_name) - continue + if element.align_errors or element.field_errors: + elements_not_found.append(element_name) + continue - # add offset - try: - dx = error_table["dx"][i_line] - except KeyError: - dx = 0 - try: - dy = error_table["dy"][i_line] - except KeyError: - dy = 0 - if dx or dy: - self.add_offset_error_to(element_name, dx, dy) - - # add tilt - try: - dpsi = error_table["dpsi"][i_line] - except KeyError: - dpsi = 0 - if dpsi: - self.add_tilt_error_to(element_name, angle=dpsi / deg2rad) - - # add aperture-only offset - try: - arex = error_table["arex"][i_line] - except KeyError: - arex = 0 - try: - arey = error_table["arey"][i_line] - except KeyError: - arey = 0 - if arex or arey: - self.add_aperture_offset_error_to(element_name, arex, arey) - - # add multipole error - knl = [ - error_table[f"k{o}l"][i_line] - for o in range(max_multipole_err + 1) - ] - ksl = [ - error_table[f"k{o}sl"][i_line] - for o in range(max_multipole_err + 1) - ] - if any(knl) or any(ksl): - self.add_multipole_error_to(element_name, knl, ksl) + if element.align_errors: + # add offset + dx = element.align_errors.dx + dy = element.align_errors.dy + if dx or dy: + self._add_offset_error_to(element_name, dx, dy) + + # add tilt + dpsi = element.align_errors.dpsi + if dpsi: + self._add_tilt_error_to(element_name, angle=dpsi / deg2rad) + + # add aperture-only offset + arex = element.align_errors.arex + arey = element.align_errors.arey + if arex or arey: + self._add_aperture_offset_error_to(element_name, arex, arey) + + # check for errors which cannot be treated yet: + for error_type in dir(element.align_errors): + if not error_type[0] == '_' and \ + error_type not in ['dx', 'dy', 'dpsi', 'arex', + 'arey', 'count', 'index']: + print( + f'Warning: MAD-X error type "{error_type}"' + " not implemented yet." + ) + + if element.field_errors: + # add multipole error + if any(element.field_errors.dkn) or \ + any(element.field_errors.dks): + knl = element.field_errors.dkn + ksl = element.field_errors.dks + knl = knl[:np.amax(np.where(knl)) + 1] # delete trailing zeros + ksl = ksl[:np.amax(np.where(ksl)) + 1] # to keep order low + self._add_multipole_error_to(element_name, knl, ksl) return elements_not_found
merge_consecutive_drifts() can prevent aperture misalignment Example assumes #38 to be fixed ``` import cpymad.madx import pysixtrack # code from help(pysixtrack.Line.apply_madx_errors) with minimal madx sequence madx = cpymad.madx.Madx() madx.input(''' !----EXAMPLE---------------------------------------------------------------- MQ1: Quadrupole, K1:=KQ1, L=1.0; MQ2: Quadrupole, K1:=KQ2, L=1.0, apertype=circle, aperture=0.2; KQ1 = 0.02; KQ2 = -0.02; testseq: SEQUENCE, l = 20.0; MQ1, at = 5; MQ2, at=18; ENDSEQUENCE; !---the usual stuff BEAM, PARTICLE=PROTON, ENERGY=7000.0, EXN=2.2e-6, EYN=2.2e-6; USE, SEQUENCE=testseq; Select, flag=makethin, pattern="MQ2", slice=2; makethin, sequence=testseq, style=collim; use, sequence=testseq; !---misalign collimator select, flag = error, clear; select, flag = error, pattern = "MQ2"; ealign, dx=-0.04; !---/EXAMPLE---------------------------------------------------------------- ''') seq = madx.sequence.testseq madx.command.esave(file='lattice_errors.err') madx.command.readtable(file='lattice_errors.err', table="errors") errors = madx.table.errors pysixtrack_line = pysixtrack.Line.from_madx_sequence(seq,install_apertures=True) pysixtrack_line.merge_consecutive_drifts(inplace=True); pysixtrack_line.apply_madx_errors(errors) print('\n') print('Line content:') for name,element in zip(pysixtrack_line.element_names,pysixtrack_line.elements): print("{0:30} : {1}".format(name,element)) ``` The example creates a sliced quad that has a marker with an aperture in the middle. If merge_consecutive_drifts() is called before apply_madx_errors() the centre aperture is not misaligned because the corresponding zero-length drift is gone. Solution: either search for aperture markers if an element is not found, or make sure that zero length drifts are only removed after applying errors.
SixTrack/pysixtrack
diff --git a/tests/test_line.py b/tests/test_line.py index d46cb92..41272e0 100644 --- a/tests/test_line.py +++ b/tests/test_line.py @@ -34,24 +34,24 @@ def test_line(): n_elements += 1 assert len(line) == n_elements - line.add_offset_error_to(multipole_name, dx=0, dy=0) + line._add_offset_error_to(multipole_name, dx=0, dy=0) n_elements += 2 assert len(line) == n_elements - line.add_offset_error_to(multipole_name, dx=0.2, dy=-0.003) + line._add_offset_error_to(multipole_name, dx=0.2, dy=-0.003) n_elements += 2 assert len(line) == n_elements - line.add_tilt_error_to(multipole_name, angle=0) + line._add_tilt_error_to(multipole_name, angle=0) n_elements += 2 assert len(line) == n_elements - line.add_tilt_error_to(multipole_name, angle=0.1) + line._add_tilt_error_to(multipole_name, angle=0.1) n_elements += 2 assert len(line) == n_elements - line.add_multipole_error_to(multipole_name, knl=[0, 0.1], ksl=[-0.03, 0.01]) - # line.add_multipole_error_to(drift_exact,knl=[0,0.1],ksl=[-0.03,0.01]) + line._add_multipole_error_to(multipole_name, knl=[0, 0.1], ksl=[-0.03, 0.01]) + # line._add_multipole_error_to(drift_exact,knl=[0,0.1],ksl=[-0.03,0.01]) line_dict = line.to_dict() line = pysixtrack.Line.from_dict(line_dict) diff --git a/tests/test_madx_import.py b/tests/test_madx_import.py index 3b04029..69085bd 100644 --- a/tests/test_madx_import.py +++ b/tests/test_madx_import.py @@ -153,18 +153,16 @@ def test_error_import(): select, flag = error, clear; select, flag = error, pattern = "MQ3"; ealign, dx = 0.00, dy = 0.00, arex = 0.00, arey = 0.00, dpsi = 0.00; - efcomp, DKN = {0.0, 0.0, 0.001, 0.002}, DKS = {0.0, 0.0, 0.003, 0.004}; + efcomp, DKN = {0.0, 0.0, 0.001, 0.002}, DKS = {0.0, 0.0, 0.003, 0.004, 0.005}; select, flag = error, full; ''') seq = madx.sequence.testseq - # store already applied errors: - madx.command.esave(file='lattice_errors.err') - madx.command.readtable(file='lattice_errors.err', table="errors") - os.remove('lattice_errors.err') - errors = madx.table.errors - - pysixtrack_line = pysixtrack.Line.from_madx_sequence(seq, install_apertures=True) - pysixtrack_line.apply_madx_errors(errors) + + pysixtrack_line = pysixtrack.Line.from_madx_sequence( + seq, + install_apertures=True, + apply_madx_errors=True, + ) madx.input('stop;') expected_element_num = ( @@ -224,6 +222,7 @@ def test_error_import(): assert abs(MQ3.knl[3] - 0.002) < 1e-14 assert abs(MQ3.ksl[2] - 0.003) < 1e-14 assert abs(MQ3.ksl[3] - 0.004) < 1e-14 + assert abs(MQ3.ksl[4] - 0.005) < 1e-14 def test_neutral_errors(): @@ -256,7 +255,7 @@ def test_neutral_errors(): USE, SEQUENCE=testseq; - Select, flag=makethin, pattern="MQ1", slice=2; + Select, flag=makethin, pattern="T1", slice=2; makethin, sequence=testseq; use, sequence=testseq; @@ -274,14 +273,12 @@ def test_neutral_errors(): select, flag = error, full; ''') seq = madx.sequence.testseq - # store already applied errors: - madx.command.esave(file='lattice_errors.err') - madx.command.readtable(file='lattice_errors.err', table="errors") - os.remove('lattice_errors.err') - errors = madx.table.errors - - pysixtrack_line = pysixtrack.Line.from_madx_sequence(seq, install_apertures=True) - pysixtrack_line.apply_madx_errors(errors) + + pysixtrack_line = pysixtrack.Line.from_madx_sequence( + seq, + install_apertures=True, + apply_madx_errors=True, + ) madx.input('stop;') initial_x = 0.025 @@ -296,3 +293,152 @@ def test_neutral_errors(): assert abs(particle.x-initial_x) < 1e-14 assert abs(particle.y-initial_y) < 1e-14 + + +def test_error_functionality(): + # check if errors are actually working as intended + cpymad_spec = util.find_spec("cpymad") + if cpymad_spec is None: + print("cpymad is not available - abort test") + sys.exit(0) + + from cpymad.madx import Madx + import numpy as np + + madx = Madx() + + madx.input(''' + T1: Collimator, L=0.0, apertype=CIRCLE, aperture={0.5}; + T2: Marker; + T3: Collimator, L=0.0, apertype=CIRCLE, aperture={0.5}; + + testseq: SEQUENCE, l = 20.0; + T1, at = 5; + T2, at = 10; + T3, at = 15; + ENDSEQUENCE; + + !---the usual stuff + BEAM, PARTICLE=PROTON, ENERGY=7000.0, EXN=2.2e-6, EYN=2.2e-6; + USE, SEQUENCE=testseq; + + !---assign misalignments and field errors + select, flag = error, clear; + select, flag = error, pattern = "T1"; + ealign, dx = 0.01, dy = 0.02, arex = 0.03, arey = 0.04; + select, flag = error, clear; + select, flag = error, pattern = "T3"; + ealign, dx = 0.07, dy = 0.08, dpsi = 0.7, arex = 0.08, arey = 0.09; + select, flag = error, full; + ''') + seq = madx.sequence.testseq + + pysixtrack_line = pysixtrack.Line.from_madx_sequence( + seq, + install_apertures=True, + apply_madx_errors=True, + ) + madx.input('stop;') + + x_init = 0.1*np.random.rand(10) + y_init = 0.1*np.random.rand(10) + particles = pysixtrack.Particles( + x=x_init.copy(), + y=y_init.copy() + ) + + T1_checked = False + T1_aper_checked = False + T2_checked = False + T3_checked = False + T3_aper_checked = False + for element, element_name in zip(pysixtrack_line.elements, + pysixtrack_line.element_names): + ret = element.track(particles) + + if element_name == 't1': + T1_checked = True + assert np.all(abs(particles.x - (x_init - 0.01)) < 1e-14) + assert np.all(abs(particles.y - (y_init - 0.02)) < 1e-14) + if element_name == 't1_aperture': + T1_aper_checked = True + assert np.all(abs(particles.x - (x_init - 0.01 - 0.03)) < 1e-14) + assert np.all(abs(particles.y - (y_init - 0.02 - 0.04)) < 1e-14) + if element_name == 't2': + T2_checked = True + assert np.all(abs(particles.x - x_init) < 1e-14) + assert np.all(abs(particles.y - y_init) < 1e-14) + cospsi = np.cos(0.7) + sinpsi = np.sin(0.7) + if element_name == 't3': + T3_checked = True + assert np.all(abs( + particles.x + - (x_init - 0.07)*cospsi + - (y_init - 0.08)*sinpsi + ) < 1e-14) + assert np.all(abs( + particles.y + + (x_init - 0.07)*sinpsi + - (y_init - 0.08)*cospsi + ) < 1e-14) + if element_name == 't3_aperture': + T3_aper_checked = True + assert np.all(abs( + particles.x + - (x_init - 0.07)*cospsi + - (y_init - 0.08)*sinpsi + - (-0.08) + ) < 1e-14) + assert np.all(abs( + particles.y + + (x_init - 0.07)*sinpsi + - (y_init - 0.08)*cospsi + - (-0.09) + ) < 1e-14) + + if ret is not None: + break + + assert not ret + assert np.all([T1_checked, T1_aper_checked, + T2_checked, T3_checked, T3_aper_checked]) + + +def test_zero_errors(): + # check that zero-errors are loaded without erro + cpymad_spec = util.find_spec("cpymad") + if cpymad_spec is None: + print("cpymad is not available - abort test") + sys.exit(0) + + from cpymad.madx import Madx + + madx = Madx() + madx.input(''' + qd: multipole, knl={0,-0.3}; + qf: multipole, knl={0, 0.3}; + testseq: sequence, l = 1; + qd, at = 0.3; + qf, at = 0.6; + endsequence; + ''') + madx.select(flag='error', pattern='qf') + madx.command.efcomp( + dkn=[0, 0, 0, 0, 0.0, 0.0, 0.0], + dks=[0.0, 0.0, 0, 0] + ) + madx.command.ealign( + dx=0.0, + dy=0.0, + ds=0.0, + DPHI=0.0, + DTHETA=0.0, + DPSI=0.0, + MREX=0.0, + MREY=0.0, + MSCALX=0.0, + MSCALY=0.0, + AREX=0.0, + AREY=0.0 + )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cpymad==1.17.0 dataclasses==0.6 exceptiongroup==1.2.2 iniconfig==2.1.0 minrpc==0.1.0 numpy==2.0.2 packaging==24.2 pluggy==1.5.0 -e git+https://github.com/SixTrack/pysixtrack.git@4ab545da7489efeceb3f36580ba59f4fc046a12e#egg=pysixtrack pytest==8.3.5 scipy==1.13.1 tomli==2.2.1
name: pysixtrack channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cpymad==1.17.0 - dataclasses==0.6 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - minrpc==0.1.0 - numpy==2.0.2 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - scipy==1.13.1 - tomli==2.2.1 prefix: /opt/conda/envs/pysixtrack
[ "tests/test_line.py::test_line", "tests/test_madx_import.py::test_error_import", "tests/test_madx_import.py::test_neutral_errors", "tests/test_madx_import.py::test_error_functionality" ]
[]
[ "tests/test_madx_import.py::test_madx_import", "tests/test_madx_import.py::test_zero_errors" ]
[]
Apache License 2.0
8,809
2,097
[ "pysixtrack/line.py" ]
WPI-MMR__gym_solo-15
88d9d645d1f27bd2754adfbcdd6c794daf818d08
2020-10-28 17:12:26
88d9d645d1f27bd2754adfbcdd6c794daf818d08
diff --git a/gym_solo/core/rewards.py b/gym_solo/core/rewards.py index d364f98..35284e6 100644 --- a/gym_solo/core/rewards.py +++ b/gym_solo/core/rewards.py @@ -55,9 +55,15 @@ class RewardFactory: def get_reward(self) -> float: """Evaluate the current state and get the combined reward. + Exceptions: + ValueError: If get_reward() is called with no registered rewards. + Returns: float: The reward from the current state. Note that this reward is a combination of multiple atomic sub-rewards, as explained by the strategies earlier. """ + if not self._rewards: + raise ValueError('Need to register at least one reward instance') + return sum(wr.weight * wr.reward.compute() for wr in self._rewards) \ No newline at end of file diff --git a/gym_solo/envs/solo8v2vanilla.py b/gym_solo/envs/solo8v2vanilla.py index bc0d4f6..4521e96 100644 --- a/gym_solo/envs/solo8v2vanilla.py +++ b/gym_solo/envs/solo8v2vanilla.py @@ -13,6 +13,8 @@ from gym import error, spaces from gym_solo.core.configs import Solo8BaseConfig from gym_solo.core import obs +from gym_solo.core import rewards + from gym_solo import solo_types @@ -33,6 +35,7 @@ class Solo8VanillaEnv(gym.Env): self._config = config self.obs_factory = obs.ObservationFactory() + self.reward_factory = rewards.RewardFactory() self._client = p.connect(p.GUI if use_gui else p.DIRECT) p.setAdditionalSearchPath(pbd.getDataPath()) @@ -72,9 +75,10 @@ class Solo8VanillaEnv(gym.Env): if self._realtime: time.sleep(self._config.dt) - # TODO: Fix rewards obs_values, obs_labels = self.obs_factory.get_obs() - return obs_values, 0.0, False, {'labels': obs_labels} + reward = self.reward_factory.get_reward() + + return obs_values, reward, False, {'labels': obs_labels} def reset(self) -> solo_types.obs: """Reset the state of the environment and returns an initial observation. @@ -85,9 +89,14 @@ class Solo8VanillaEnv(gym.Env): p.removeBody(self.robot) self.robot, _ = self._load_robot() - # Let gravity do it's thing and reset the environment + # Let gravity do it's thing and reset the environment deterministically for i in range(1000): - self.step(self._zero_gains) + p.setJointMotorControlArray(self.robot, + np.arange(self.action_space.shape[0]), + p.TORQUE_CONTROL, forces=self._zero_gains, + positionGains=self._zero_gains, + velocityGains=self._zero_gains) + p.stepSimulation() obs_values, _ = self.obs_factory.get_obs() return obs_values
Integrate Reward Factory with Solo Env It seems as if the Solo Env is still sending out a hard coded reward--this needs to be switched to the dynamic model asap: https://github.com/WPI-MMR/gym_solo/blob/88d9d645d1f27bd2754adfbcdd6c794daf818d08/gym_solo/envs/solo8v2vanilla.py#L77 ^^ Here the 0.0 needs to be dynamically computed for the reward.
WPI-MMR/gym_solo
diff --git a/gym_solo/core/test_rewards_factory.py b/gym_solo/core/test_rewards_factory.py index fd70bea..c29bc5a 100644 --- a/gym_solo/core/test_rewards_factory.py +++ b/gym_solo/core/test_rewards_factory.py @@ -17,6 +17,9 @@ class TestRewardsFactory(unittest.TestCase): rf = rewards.RewardFactory() self.assertListEqual(rf._rewards, []) + with self.assertRaises(ValueError): + rf.get_reward() + @parameterized.expand([ ('single', {1: 2.5}, 2.5), ('two_happy', {1: 1, 2: 2}, 5), diff --git a/gym_solo/envs/test_solo8v2vanilla.py b/gym_solo/envs/test_solo8v2vanilla.py index 5219151..59a4044 100644 --- a/gym_solo/envs/test_solo8v2vanilla.py +++ b/gym_solo/envs/test_solo8v2vanilla.py @@ -1,6 +1,9 @@ import unittest from gym_solo.envs import solo8v2vanilla as solo_env +from gym_solo.core.test_obs_factory import CompliantObs +from gym_solo.core import rewards + from gym import error, spaces from parameterized import parameterized from unittest import mock @@ -11,9 +14,15 @@ import os import pybullet as p +class SimpleReward(rewards.Reward): + def compute(self): + return 1 + + class TestSolo8v2VanillaEnv(unittest.TestCase): def setUp(self): self.env = solo_env.Solo8VanillaEnv(config=solo_env.Solo8VanillaConfig()) + self.env.reward_factory.register_reward(1, SimpleReward()) def tearDown(self): self.env._close() @@ -29,6 +38,9 @@ class TestSolo8v2VanillaEnv(unittest.TestCase): def test_realtime(self, mock_time): env = solo_env.Solo8VanillaEnv(config=solo_env.Solo8VanillaConfig(), realtime=True) + env.reward_factory.register_reward(1, SimpleReward()) + + env.step(env.action_space.sample()) self.assertTrue(mock_time.called) def test_seed(self): @@ -123,6 +135,19 @@ class TestSolo8v2VanillaEnv(unittest.TestCase): np.testing.assert_array_almost_equal(base_pos, new_pos) np.testing.assert_array_almost_equal(base_or, new_or) + def test_step_no_rewards(self): + env = solo_env.Solo8VanillaEnv(config=solo_env.Solo8VanillaConfig()) + with self.assertRaises(ValueError): + env.step(np.zeros(self.env.action_space.shape[0])) + + def test_step_simple_reward(self): + obs, reward, done, info = self.env.step(self.env.action_space.sample()) + self.assertEqual(reward, 1) + + def test_observation_space(self): + o = CompliantObs(None) + self.env.obs_factory.register_observation(o) + self.assertEqual(o.observation_space, self.env.observation_space) if __name__ == '__main__': unittest.main() \ No newline at end of file
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 2 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.8", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cloudpickle==3.1.1 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work gym==0.26.2 gym-notices==0.0.8 -e git+https://github.com/WPI-MMR/gym_solo.git@88d9d645d1f27bd2754adfbcdd6c794daf818d08#egg=gym_solo importlib_metadata==8.5.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work numpy==1.24.4 packaging @ file:///croot/packaging_1720101850331/work parameterized==0.9.0 pluggy @ file:///tmp/build/80754af9/pluggy_1648042571233/work pybullet==3.2.7 pytest @ file:///croot/pytest_1717793244625/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work zipp==3.20.2
name: gym_solo channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py38h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.1=py38h06a4308_0 - pip=24.2=py38h06a4308_0 - pluggy=1.0.0=py38h06a4308_1 - pytest=7.4.4=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py38h06a4308_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cloudpickle==3.1.1 - gym==0.26.2 - gym-notices==0.0.8 - importlib-metadata==8.5.0 - numpy==1.24.4 - parameterized==0.9.0 - pybullet==3.2.7 - zipp==3.20.2 prefix: /opt/conda/envs/gym_solo
[ "gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_empty", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_GUI_0_default", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_GUI_1_nogui", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_GUI_2_gui", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_action_space", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_actions", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_observation_space", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_realtime", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_reset", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_seed", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_step_no_rewards", "gym_solo/envs/test_solo8v2vanilla.py::TestSolo8v2VanillaEnv::test_step_simple_reward" ]
[]
[ "gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_0_single", "gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_1_two_happy", "gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_2_0_weight", "gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_3_negative_weight", "gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_4_three" ]
[]
MIT License
8,812
757
[ "gym_solo/core/rewards.py", "gym_solo/envs/solo8v2vanilla.py" ]
testing-cabal__systemfixtures-9
9c0908083a2f8914621ef5068c024ee41f84981a
2020-10-29 12:39:29
9c0908083a2f8914621ef5068c024ee41f84981a
nessita: I will need to propose a slightly different patch because, while the Python documentation for `DirEntry` seems to imply it's available since Python 3.5 (https://docs.python.org/3/library/os.html#os.DirEntry), as explained in this bug https://bugs.python.org/issue28530, the `DirEntry` it's not exposed in the `os` module until 3.6. The suggested workaround would be something along these lines: ```python import os try: from os import DirEntry except ImportError: import tempfile with tempfile.NamedTemporaryFile() as ftemp: scan = os.scandir(os.path.dirname(ftemp.name)) DirEntry = type(next(scan)) del scan, ftemp, tempfile ``` jelmer: Python 3.5 is end of life - can you simplify this PR by dropping support for it?
diff --git a/systemfixtures/filesystem.py b/systemfixtures/filesystem.py index 40d9da0..f26a3ee 100644 --- a/systemfixtures/filesystem.py +++ b/systemfixtures/filesystem.py @@ -12,6 +12,7 @@ if six.PY2: BUILTIN_OPEN = "__builtin__.open" if six.PY3: BUILTIN_OPEN = "builtins.open" + from os import DirEntry GENERIC_APIS = ( @@ -139,6 +140,8 @@ class FakeFilesystem(Fixture): def _is_fake_path_or_fd(self, path, *args, **kwargs): if isinstance(path, int): path = self._path_from_fd(path) + elif isinstance(path, DirEntry): + path = path.name return self._is_fake_path(path) def _is_fake_symlink(self, src, dst, *args, **kwargs):
shutil.copytree to an overlayed dir fails under Python 3.8 When copying a tree to an overlayed dir, I get the following exception casued by `_is_fake_path` not handling DirEntry params: ```python shutil.copytree(CHARM_DIR, self.charm_dir) File "/usr/lib/python3.8/shutil.py", line 554, in copytree return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks, File "/usr/lib/python3.8/shutil.py", line 496, in _copytree copy_function(srcobj, dstname) File "/usr/lib/python3.8/shutil.py", line 432, in copy2 copyfile(src, dst, follow_symlinks=follow_symlinks) File "/usr/lib/python3.8/shutil.py", line 261, in copyfile with open(src, 'rb') as fsrc, open(dst, 'wb') as fdst: File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/_overlay.py", line 23, in _new_value if self.condition(*args, **kwargs): File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/filesystem.py", line 146, in _is_fake_path_or_fd return self._is_fake_path(path) File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/filesystem.py", line 133, in _is_fake_path if path.startswith(prefix): AttributeError: 'posix.DirEntry' object has no attribute 'startswith' ``` A possible fix would be something like this: ```python 129 if isinstance(path, os.DirEntry): 130 path = path.name ```
testing-cabal/systemfixtures
diff --git a/systemfixtures/tests/test_filesystem.py b/systemfixtures/tests/test_filesystem.py index 5041bb0..ec3d26a 100644 --- a/systemfixtures/tests/test_filesystem.py +++ b/systemfixtures/tests/test_filesystem.py @@ -97,6 +97,12 @@ class FakeFilesystemTest(TestCase): shutil.rmtree("/foo/bar") self.assertEqual([], os.listdir("/foo")) + def test_copytree(self): + self.fs.add("/foo") + shutil.copytree("./doc", "/foo") + self.assertEqual( + sorted(os.listdir("./doc")), sorted(os.listdir("/foo"))) + if six.PY3: def test_listdir_with_fd(self):
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 docutils==0.21.2 exceptiongroup==1.2.2 execnet==2.1.1 fakesleep==0.1 fixtures==4.2.4.post1 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 packaging==24.2 pbr==6.1.1 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 requests==2.32.3 requests-mock==1.12.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 -e git+https://github.com/testing-cabal/systemfixtures.git@9c0908083a2f8914621ef5068c024ee41f84981a#egg=systemfixtures testtools==2.7.2 tomli==2.2.1 typing_extensions==4.13.0 urllib3==2.3.0 zipp==3.21.0
name: systemfixtures channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - docutils==0.21.2 - exceptiongroup==1.2.2 - execnet==2.1.1 - fakesleep==0.1 - fixtures==4.2.4.post1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - packaging==24.2 - pbr==6.1.1 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - requests==2.32.3 - requests-mock==1.12.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - systemfixtures==0.6.8.dev5 - testtools==2.7.2 - tomli==2.2.1 - typing-extensions==4.13.0 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/systemfixtures
[ "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_copytree" ]
[]
[ "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add_non_absolute", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add_sub_paths", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_chmod", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_chown", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_fchown", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_glob", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_listdir_with_fd", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_readlink_to_fake_path", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_readlink_to_real_path", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_rename", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_rmtree", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_sqlite3", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_symlink", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_unlink", "systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_walk" ]
[]
MIT License
8,823
214
[ "systemfixtures/filesystem.py" ]
nteract__papermill-544
d5299b0f2705d7a9175c377aa2a2f812c83239e3
2020-10-30 21:03:45
d5299b0f2705d7a9175c377aa2a2f812c83239e3
codecov[bot]: # [Codecov](https://codecov.io/gh/nteract/papermill/pull/544?src=pr&el=h1) Report > Merging [#544](https://codecov.io/gh/nteract/papermill/pull/544?src=pr&el=desc) into [main](https://codecov.io/gh/nteract/papermill/commit/d5299b0f2705d7a9175c377aa2a2f812c83239e3?el=desc) will **decrease** coverage by `0.22%`. > The diff coverage is `60.00%`. ```diff @@ Coverage Diff @@ ## main #544 +/- ## ========================================== - Coverage 92.30% 92.07% -0.23% ========================================== Files 16 16 Lines 1403 1413 +10 ========================================== + Hits 1295 1301 +6 - Misses 108 112 +4 ```
diff --git a/papermill/execute.py b/papermill/execute.py index 5a2a46f..c16aa88 100644 --- a/papermill/execute.py +++ b/papermill/execute.py @@ -2,6 +2,7 @@ import copy import nbformat +from pathlib import Path from .log import logger from .exceptions import PapermillExecutionError @@ -32,9 +33,9 @@ def execute_notebook( Parameters ---------- - input_path : str + input_path : str or Path Path to input notebook - output_path : str + output_path : str or Path Path to save executed notebook parameters : dict, optional Arbitrary keyword arguments to pass to the notebook parameters @@ -56,7 +57,7 @@ def execute_notebook( Duration in seconds to wait for kernel start-up report_mode : bool, optional Flag for whether or not to hide input. - cwd : str, optional + cwd : str or Path, optional Working directory to use when executing the notebook **kwargs Arbitrary keyword arguments to pass to the notebook engine @@ -66,6 +67,13 @@ def execute_notebook( nb : NotebookNode Executed notebook object """ + if isinstance(input_path, Path): + input_path = str(input_path) + if isinstance(output_path, Path): + output_path = str(output_path) + if isinstance(cwd, Path): + cwd = str(cwd) + path_parameters = add_builtin_parameters(parameters) input_path = parameterize_path(input_path, path_parameters) output_path = parameterize_path(output_path, path_parameters) diff --git a/papermill/inspection.py b/papermill/inspection.py index 79d6542..f8c17ba 100644 --- a/papermill/inspection.py +++ b/papermill/inspection.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- """Deduce parameters of a notebook from the parameters cell.""" import click +from pathlib import Path from .iorw import get_pretty_path, load_notebook_node, local_file_io_cwd from .log import logger @@ -103,7 +104,7 @@ def inspect_notebook(notebook_path, parameters=None): Parameters ---------- - notebook_path : str + notebook_path : str or Path Path to notebook parameters : dict, optional Arbitrary keyword arguments to pass to the notebook parameters @@ -113,6 +114,9 @@ def inspect_notebook(notebook_path, parameters=None): Dict[str, Parameter] Mapping of (parameter name, {name, inferred_type_name, default, help}) """ + if isinstance(notebook_path, Path): + notebook_path = str(notebook_path) + nb = _open_notebook(notebook_path, parameters) params = _infer_parameters(nb)
Failure when a path is pathlib.Path The Papermill Python API fails if either of the path arguments supplied to `execute_notebook` is a `pathlib.Path`. This is because [`parameterize_path` assumes that the path is always a `str`](https://github.com/nteract/papermill/blob/d5299b0f2705d7a9175c377aa2a2f812c83239e3/papermill/parameterize.py#L50). In order to support parameterized paths, Papermill probably has to continue work with `str`s internally, but it would be nice if Papermill did not fail when given a `Path`. A rasonable fix would be to convert each `Path` object to a `str` before continuing.
nteract/papermill
diff --git a/papermill/tests/test_execute.py b/papermill/tests/test_execute.py index dbee1fb..5a60a58 100644 --- a/papermill/tests/test_execute.py +++ b/papermill/tests/test_execute.py @@ -5,6 +5,7 @@ import tempfile import unittest from functools import partial +from pathlib import Path try: from unittest.mock import patch @@ -278,6 +279,16 @@ class TestCWD(unittest.TestCase): os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname)) ) + def test_pathlib_paths(self): + # Copy of test_execution_respects_cwd_assignment but with `Path`s + with chdir(self.base_test_dir): + execute_notebook( + Path(self.check_notebook_name), + Path(self.nb_test_executed_fname), + cwd=Path(self.test_dir), + ) + self.assertTrue(Path(self.base_test_dir).joinpath(self.nb_test_executed_fname).exists()) + class TestSysExit(unittest.TestCase): def setUp(self): diff --git a/papermill/tests/test_inspect.py b/papermill/tests/test_inspect.py index 8ca6860..823501f 100644 --- a/papermill/tests/test_inspect.py +++ b/papermill/tests/test_inspect.py @@ -52,7 +52,12 @@ def click_context(): ], ) def test_inspect_notebook(name, expected): - assert inspect_notebook(str(name)) == expected + assert inspect_notebook(name) == expected + + +def test_str_path(): + expected = {"msg": {"name": "msg", "inferred_type_name": "None", "default": "None", "help": ""}} + assert inspect_notebook(str(_get_fullpath("simple_execute.ipynb"))) == expected @pytest.mark.parametrize(
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohappyeyeballs==2.6.1 aiohttp==3.11.14 aiosignal==1.3.2 alabaster==0.7.16 ansiwrap==0.8.4 anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 asttokens==3.0.0 async-lru==2.0.5 async-timeout==5.0.1 attrs==25.3.0 azure-core==1.32.0 azure-datalake-store==0.0.53 azure-storage-blob==12.25.1 babel==2.17.0 backports.tarfile==1.2.0 beautifulsoup4==4.13.3 black==25.1.0 bleach==6.2.0 boto==2.49.0 boto3==1.37.23 botocore==1.37.23 build==1.2.2.post1 bump2version==1.0.1 bumpversion==0.6.0 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 cfgv==3.4.0 chardet==5.2.0 charset-normalizer==3.4.1 check-manifest==0.50 click==8.1.8 codecov==2.1.13 colorama==0.4.6 comm==0.2.2 commonmark==0.9.1 coverage==7.8.0 cryptography==44.0.2 debugpy==1.8.13 decorator==5.2.1 defusedxml==0.7.1 distlib==0.3.9 distro==1.9.0 docutils==0.21.2 entrypoints==0.4 exceptiongroup==1.2.2 execnet==2.1.1 executing==2.2.0 fastjsonschema==2.21.1 filelock==3.18.0 flake8==7.2.0 fqdn==1.5.1 frozenlist==1.5.0 fsspec==2025.3.1 gcsfs==2025.3.1 google-api-core==2.24.2 google-auth==2.38.0 google-auth-oauthlib==1.2.1 google-cloud-core==2.4.3 google-cloud-storage==3.1.0 google-compute-engine==2.8.13 google-crc32c==1.7.1 google-resumable-media==2.7.2 googleapis-common-protos==1.69.2 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 id==1.5.0 identify==2.6.9 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==6.29.5 ipython==8.18.1 ipywidgets==8.1.5 isodate==0.7.2 isoduration==20.11.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jedi==0.19.2 jeepney==0.9.0 Jinja2==3.1.6 jmespath==1.0.1 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.3.6 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==3.0.13 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 matplotlib-inline==0.1.7 mccabe==0.7.0 mdurl==0.1.2 mistune==3.1.3 mock==5.2.0 more-itertools==10.6.0 moto==5.1.2 msal==1.32.0 multidict==6.2.0 mypy-extensions==1.0.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nest-asyncio==1.6.0 nh3==0.2.21 nodeenv==1.9.1 notebook==7.3.3 notebook_shim==0.2.4 oauthlib==3.2.2 overrides==7.7.0 packaging==24.2 pandocfilters==1.5.1 -e git+https://github.com/nteract/papermill.git@d5299b0f2705d7a9175c377aa2a2f812c83239e3#egg=papermill parso==0.8.4 pathspec==0.12.1 pexpect==4.9.0 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 prometheus_client==0.21.1 prompt_toolkit==3.0.50 propcache==0.3.1 proto-plus==1.26.1 protobuf==6.30.2 psutil==7.0.0 ptyprocess==0.7.0 pure_eval==0.2.3 pyarrow==19.0.1 pyasn1==0.6.1 pyasn1_modules==0.4.2 pycodestyle==2.13.0 pycparser==2.22 pyflakes==3.3.1 Pygments==2.19.1 PyJWT==2.10.1 pyproject-api==1.9.0 pyproject_hooks==1.2.0 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-env==1.1.5 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 PyYAML==6.0.2 pyzmq==26.3.0 readme_renderer==44.0 recommonmark==0.7.1 referencing==0.36.2 requests==2.32.3 requests-oauthlib==2.0.0 requests-toolbelt==1.0.0 responses==0.25.7 rfc3339-validator==0.1.4 rfc3986==2.0.0 rfc3986-validator==0.1.1 rich==14.0.0 rpds-py==0.24.0 rsa==4.9 s3transfer==0.11.4 SecretStorage==3.3.3 Send2Trash==1.8.3 six==1.17.0 sniffio==1.3.1 snowballstemmer==2.2.0 soupsieve==2.6 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 stack-data==0.6.3 tenacity==9.0.0 terminado==0.18.1 textwrap3==0.9.2 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 tox==4.25.0 tqdm==4.67.1 traitlets==5.14.3 twine==6.1.0 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 uri-template==1.3.0 urllib3==1.26.20 virtualenv==20.29.3 wcwidth==0.2.13 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 Werkzeug==3.1.3 widgetsnbextension==4.0.13 xmltodict==0.14.2 yarl==1.18.3 zipp==3.21.0
name: papermill channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohappyeyeballs==2.6.1 - aiohttp==3.11.14 - aiosignal==1.3.2 - alabaster==0.7.16 - ansiwrap==0.8.4 - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - asttokens==3.0.0 - async-lru==2.0.5 - async-timeout==5.0.1 - attrs==25.3.0 - azure-core==1.32.0 - azure-datalake-store==0.0.53 - azure-storage-blob==12.25.1 - babel==2.17.0 - backports-tarfile==1.2.0 - beautifulsoup4==4.13.3 - black==25.1.0 - bleach==6.2.0 - boto==2.49.0 - boto3==1.37.23 - botocore==1.37.23 - build==1.2.2.post1 - bump2version==1.0.1 - bumpversion==0.6.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - cfgv==3.4.0 - chardet==5.2.0 - charset-normalizer==3.4.1 - check-manifest==0.50 - click==8.1.8 - codecov==2.1.13 - colorama==0.4.6 - comm==0.2.2 - commonmark==0.9.1 - coverage==7.8.0 - cryptography==44.0.2 - debugpy==1.8.13 - decorator==5.2.1 - defusedxml==0.7.1 - distlib==0.3.9 - distro==1.9.0 - docutils==0.21.2 - entrypoints==0.4 - exceptiongroup==1.2.2 - execnet==2.1.1 - executing==2.2.0 - fastjsonschema==2.21.1 - filelock==3.18.0 - flake8==7.2.0 - fqdn==1.5.1 - frozenlist==1.5.0 - fsspec==2025.3.1 - gcsfs==2025.3.1 - google-api-core==2.24.2 - google-auth==2.38.0 - google-auth-oauthlib==1.2.1 - google-cloud-core==2.4.3 - google-cloud-storage==3.1.0 - google-compute-engine==2.8.13 - google-crc32c==1.7.1 - google-resumable-media==2.7.2 - googleapis-common-protos==1.69.2 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - id==1.5.0 - identify==2.6.9 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==6.29.5 - ipython==8.18.1 - ipywidgets==8.1.5 - isodate==0.7.2 - isoduration==20.11.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jedi==0.19.2 - jeepney==0.9.0 - jinja2==3.1.6 - jmespath==1.0.1 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.3.6 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==3.0.13 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - matplotlib-inline==0.1.7 - mccabe==0.7.0 - mdurl==0.1.2 - mistune==3.1.3 - mock==5.2.0 - more-itertools==10.6.0 - moto==5.1.2 - msal==1.32.0 - multidict==6.2.0 - mypy-extensions==1.0.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nest-asyncio==1.6.0 - nh3==0.2.21 - nodeenv==1.9.1 - notebook==7.3.3 - notebook-shim==0.2.4 - oauthlib==3.2.2 - overrides==7.7.0 - packaging==24.2 - pandocfilters==1.5.1 - papermill==2.1.3 - parso==0.8.4 - pathspec==0.12.1 - pexpect==4.9.0 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - prometheus-client==0.21.1 - prompt-toolkit==3.0.50 - propcache==0.3.1 - proto-plus==1.26.1 - protobuf==6.30.2 - psutil==7.0.0 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pyarrow==19.0.1 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pycodestyle==2.13.0 - pycparser==2.22 - pyflakes==3.3.1 - pygments==2.19.1 - pyjwt==2.10.1 - pyproject-api==1.9.0 - pyproject-hooks==1.2.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-env==1.1.5 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pyyaml==6.0.2 - pyzmq==26.3.0 - readme-renderer==44.0 - recommonmark==0.7.1 - referencing==0.36.2 - requests==2.32.3 - requests-oauthlib==2.0.0 - requests-toolbelt==1.0.0 - responses==0.25.7 - rfc3339-validator==0.1.4 - rfc3986==2.0.0 - rfc3986-validator==0.1.1 - rich==14.0.0 - rpds-py==0.24.0 - rsa==4.9 - s3transfer==0.11.4 - secretstorage==3.3.3 - send2trash==1.8.3 - six==1.17.0 - sniffio==1.3.1 - snowballstemmer==2.2.0 - soupsieve==2.6 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - stack-data==0.6.3 - tenacity==9.0.0 - terminado==0.18.1 - textwrap3==0.9.2 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - tox==4.25.0 - tqdm==4.67.1 - traitlets==5.14.3 - twine==6.1.0 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - uri-template==1.3.0 - urllib3==1.26.20 - virtualenv==20.29.3 - wcwidth==0.2.13 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - werkzeug==3.1.3 - widgetsnbextension==4.0.13 - xmltodict==0.14.2 - yarl==1.18.3 - zipp==3.21.0 prefix: /opt/conda/envs/papermill
[ "papermill/tests/test_execute.py::TestCWD::test_pathlib_paths", "papermill/tests/test_inspect.py::test_inspect_notebook[name0-expected0]", "papermill/tests/test_inspect.py::test_inspect_notebook[name1-expected1]", "papermill/tests/test_inspect.py::test_inspect_notebook[name2-expected2]", "papermill/tests/test_inspect.py::test_inspect_notebook[name3-expected3]" ]
[ "papermill/tests/test_execute.py::TestBrokenNotebook2::test" ]
[ "papermill/tests/test_execute.py::TestNotebookHelpers::test_backslash_params", "papermill/tests/test_execute.py::TestNotebookHelpers::test_backslash_quote_params", "papermill/tests/test_execute.py::TestNotebookHelpers::test_cell_insertion", "papermill/tests/test_execute.py::TestNotebookHelpers::test_default_start_timeout", "papermill/tests/test_execute.py::TestNotebookHelpers::test_double_backslash_quote_params", "papermill/tests/test_execute.py::TestNotebookHelpers::test_no_tags", "papermill/tests/test_execute.py::TestNotebookHelpers::test_prepare_only", "papermill/tests/test_execute.py::TestNotebookHelpers::test_quoted_params", "papermill/tests/test_execute.py::TestNotebookHelpers::test_start_timeout", "papermill/tests/test_execute.py::TestBrokenNotebook1::test", "papermill/tests/test_execute.py::TestReportMode::test_report_mode", "papermill/tests/test_execute.py::TestCWD::test_execution_respects_cwd_assignment", "papermill/tests/test_execute.py::TestCWD::test_local_save_ignores_cwd_assignment", "papermill/tests/test_execute.py::TestSysExit::test_sys_exit", "papermill/tests/test_execute.py::TestSysExit::test_sys_exit0", "papermill/tests/test_execute.py::TestSysExit::test_sys_exit1", "papermill/tests/test_execute.py::TestSysExit::test_system_exit", "papermill/tests/test_inspect.py::test_str_path", "papermill/tests/test_inspect.py::test_display_notebook_help[name0-expected0]", "papermill/tests/test_inspect.py::test_display_notebook_help[name1-expected1]", "papermill/tests/test_inspect.py::test_display_notebook_help[name2-expected2]", "papermill/tests/test_inspect.py::test_display_notebook_help[name3-expected3]" ]
[]
BSD 3-Clause "New" or "Revised" License
8,833
686
[ "papermill/execute.py", "papermill/inspection.py" ]
nosarthur__gita-108
cd179b31458ccad98fdb62c13ba394a4fc7a2cf3
2020-10-31 05:10:13
cd179b31458ccad98fdb62c13ba394a4fc7a2cf3
diff --git a/gita/__main__.py b/gita/__main__.py index e00f20d..13a048e 100644 --- a/gita/__main__.py +++ b/gita/__main__.py @@ -57,7 +57,7 @@ def f_ll(args: argparse.Namespace): if args.group: # only display repos in this group group_repos = utils.get_groups()[args.group] repos = {k: repos[k] for k in group_repos if k in repos} - for line in utils.describe(repos): + for line in utils.describe(repos, no_colors=args.no_colors): print(line) @@ -245,6 +245,8 @@ def main(argv=None): nargs='?', choices=utils.get_groups(), help="show repos in the chosen group") + p_ll.add_argument('-n', '--no-colors', action='store_true', + help='Disable coloring on the branch names.') p_ll.set_defaults(func=f_ll) p_context = subparsers.add_parser('context', diff --git a/gita/info.py b/gita/info.py index 18d20fd..2d1d33f 100644 --- a/gita/info.py +++ b/gita/info.py @@ -42,6 +42,7 @@ def get_info_items() -> Tuple[Dict[str, Callable[[str], str]], List[str]]: 'path': get_path, } display_items = ['branch', 'commit_msg'] + # FIXME: remove these code # custom settings root = common.get_config_dir() src_fname = os.path.join(root, 'extra_repo_info.py') @@ -113,13 +114,15 @@ def get_commit_msg(path: str) -> str: return result.stdout.strip() -def get_repo_status(path: str) -> str: +def get_repo_status(path: str, no_colors=False) -> str: head = get_head(path) - dirty, staged, untracked, color = _get_repo_status(path) - return f'{color}{head+" "+dirty+staged+untracked:<10}{Color.end}' + dirty, staged, untracked, color = _get_repo_status(path, no_colors) + if color: + return f'{color}{head+" "+dirty+staged+untracked:<10}{Color.end}' + return f'{head+" "+dirty+staged+untracked:<10}' -def _get_repo_status(path: str) -> Tuple[str]: +def _get_repo_status(path: str, no_colors: bool) -> Tuple[str]: """ Return the status of one repo """ @@ -128,6 +131,9 @@ def _get_repo_status(path: str) -> Tuple[str]: staged = '+' if run_quiet_diff(['--cached']) else '' untracked = '_' if has_untracked() else '' + if no_colors: + return dirty, staged, untracked, '' + diff_returncode = run_quiet_diff(['@{u}', '@{0}']) has_no_remote = diff_returncode == 128 has_no_diff = diff_returncode == 0 diff --git a/gita/utils.py b/gita/utils.py index 900aefa..cc0a354 100644 --- a/gita/utils.py +++ b/gita/utils.py @@ -2,7 +2,7 @@ import os import yaml import asyncio import platform -from functools import lru_cache +from functools import lru_cache, partial from pathlib import Path from typing import List, Dict, Coroutine, Union @@ -197,13 +197,19 @@ def exec_async_tasks(tasks: List[Coroutine]) -> List[Union[None, str]]: return errors -def describe(repos: Dict[str, str]) -> str: +def describe(repos: Dict[str, str], no_colors: bool=False) -> str: """ Return the status of all repos """ if repos: name_width = max(len(n) for n in repos) + 1 funcs = info.get_info_funcs() + + get_repo_status = info.get_repo_status + if get_repo_status in funcs and no_colors: + idx = funcs.index(get_repo_status) + funcs[idx] = partial(get_repo_status, no_colors=True) + for name in sorted(repos): path = repos[name] display_items = ' '.join(f(path) for f in funcs) diff --git a/setup.py b/setup.py index 0cdd33f..cf4ab90 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ with open('README.md', encoding='utf-8') as f: setup( name='gita', packages=['gita'], - version='0.11.6', + version='0.11.7', license='MIT', description='Manage multiple git repos with sanity', long_description=long_description,
A no colour option for when running through either a script or cron. I am using gita to backup some repositories so need to run `gita ll` (just to see what will change) then do `gita pull`. Because this runs through cron, I get some ugly control characters instead of colours. Would it be possible to add a `--no-colours` option? Thank you
nosarthur/gita
diff --git a/tests/test_main.py b/tests/test_main.py index 1b23bee..1d6c9f6 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -5,7 +5,7 @@ import argparse import shlex from gita import __main__ -from gita import utils +from gita import utils, info from conftest import ( PATH_FNAME, PATH_FNAME_EMPTY, PATH_FNAME_CLASH, GROUP_FNAME, async_mock, TEST_DIR, @@ -35,6 +35,14 @@ class TestLsLl: out, err = capfd.readouterr() assert err == '' assert 'gita' in out + assert info.Color.end in out + + # no color on branch name + __main__.main(['ll', '-n']) + out, err = capfd.readouterr() + assert err == '' + assert 'gita' in out + assert info.Color.end not in out __main__.main(['ls', 'gita']) out, err = capfd.readouterr() diff --git a/tests/test_utils.py b/tests/test_utils.py index dfabcee..607a33b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -9,12 +9,9 @@ from conftest import ( @pytest.mark.parametrize('test_input, diff_return, expected', [ - ({ - 'abc': '/root/repo/' - }, True, 'abc \x1b[31mrepo *+_ \x1b[0m msg'), - ({ - 'repo': '/root/repo2/' - }, False, 'repo \x1b[32mrepo _ \x1b[0m msg'), + ([{'abc': '/root/repo/'}, False], True, 'abc \x1b[31mrepo *+_ \x1b[0m msg'), + ([{'abc': '/root/repo/'}, True], True, 'abc repo *+_ msg'), + ([{'repo': '/root/repo2/'}, False], False, 'repo \x1b[32mrepo _ \x1b[0m msg'), ]) def test_describe(test_input, diff_return, expected, monkeypatch): monkeypatch.setattr(info, 'get_head', lambda x: 'repo') @@ -23,8 +20,8 @@ def test_describe(test_input, diff_return, expected, monkeypatch): monkeypatch.setattr(info, 'has_untracked', lambda: True) monkeypatch.setattr('os.chdir', lambda x: None) print('expected: ', repr(expected)) - print('got: ', repr(next(utils.describe(test_input)))) - assert expected == next(utils.describe(test_input)) + print('got: ', repr(next(utils.describe(*test_input)))) + assert expected == next(utils.describe(*test_input)) @pytest.mark.parametrize('path_fname, expected', [
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 4 }
0.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
backports.tarfile==1.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 coverage==7.8.0 cryptography==44.0.2 docutils==0.21.2 exceptiongroup==1.2.2 execnet==2.1.1 -e git+https://github.com/nosarthur/gita.git@cd179b31458ccad98fdb62c13ba394a4fc7a2cf3#egg=gita id==1.5.0 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 keyring==25.6.0 markdown-it-py==3.0.0 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 pluggy==1.5.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 pytest-cov==6.0.0 pytest-xdist==3.6.1 PyYAML==6.0.2 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 tomli==2.2.1 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 zipp==3.21.0
name: gita channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - backports-tarfile==1.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - coverage==7.8.0 - cryptography==44.0.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - execnet==2.1.1 - id==1.5.0 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - keyring==25.6.0 - markdown-it-py==3.0.0 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - pluggy==1.5.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - pytest-cov==6.0.0 - pytest-xdist==3.6.1 - pyyaml==6.0.2 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - tomli==2.2.1 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/gita
[ "tests/test_main.py::TestLsLl::testLl", "tests/test_utils.py::test_describe[test_input0-True-abc", "tests/test_utils.py::test_describe[test_input1-True-abc", "tests/test_utils.py::test_describe[test_input2-False-repo" ]
[]
[ "tests/test_main.py::TestLsLl::testLs", "tests/test_main.py::TestLsLl::testWithPathFiles[/gita/tests/mock_path_file-repo1", "tests/test_main.py::TestLsLl::testWithPathFiles[/gita/tests/empty_path_file-]", "tests/test_main.py::TestLsLl::testWithPathFiles[/gita/tests/clash_path_file-repo1", "tests/test_main.py::test_rm", "tests/test_main.py::test_not_add", "tests/test_main.py::test_fetch", "tests/test_main.py::test_async_fetch", "tests/test_main.py::test_superman[diff", "tests/test_main.py::test_superman[commit", "tests/test_main.py::TestContext::testDisplayNoContext", "tests/test_main.py::TestContext::testDisplayContext", "tests/test_main.py::TestContext::testReset", "tests/test_main.py::TestContext::testSetFirstTime", "tests/test_main.py::TestContext::testSetSecondTime", "tests/test_main.py::TestGroupCmd::testLs", "tests/test_main.py::TestGroupCmd::testLl", "tests/test_main.py::TestGroupCmd::testRename", "tests/test_main.py::TestGroupCmd::testRenameError", "tests/test_main.py::TestGroupCmd::testRm[xx-expected0]", "tests/test_main.py::TestGroupCmd::testRm[xx", "tests/test_main.py::TestGroupCmd::testAdd", "tests/test_main.py::TestGroupCmd::testAddToExisting", "tests/test_main.py::test_rename", "tests/test_main.py::test_info", "tests/test_utils.py::test_get_repos[/gita/tests/mock_path_file-expected0]", "tests/test_utils.py::test_get_repos[/gita/tests/empty_path_file-expected1]", "tests/test_utils.py::test_get_repos[/gita/tests/clash_path_file-expected2]", "tests/test_utils.py::test_get_context", "tests/test_utils.py::test_get_groups[/gita/tests/mock_group_file-expected0]", "tests/test_utils.py::test_custom_push_cmd", "tests/test_utils.py::test_add_repos[path_input0-/home/some/repo,repo\\n]", "tests/test_utils.py::test_add_repos[path_input1-expected1]", "tests/test_utils.py::test_add_repos[path_input2-/home/some/repo1,repo1\\n]", "tests/test_utils.py::test_rename_repo", "tests/test_utils.py::test_async_output" ]
[]
MIT License
8,835
1,152
[ "gita/__main__.py", "gita/info.py", "gita/utils.py", "setup.py" ]
pypa__build-177
a1ef3b82ab8db4d889bf4e52c451a3308bb1eb6f
2020-11-01 02:33:30
6a72a234ac558234ad65791a17b263b215e31645
diff --git a/src/build/__init__.py b/src/build/__init__.py index 6a0a32f..fcdb925 100644 --- a/src/build/__init__.py +++ b/src/build/__init__.py @@ -133,26 +133,31 @@ class ProjectBuilder(object): try: with open(spec_file) as f: - self._spec = toml.load(f) + spec = toml.load(f) except FileNotFoundError: - self._spec = {} + spec = {} except PermissionError as e: raise BuildException("{}: '{}' ".format(e.strerror, e.filename)) except toml.decoder.TomlDecodeError as e: - raise BuildException('Failed to parse pyproject.toml: {} '.format(e)) - - _find_typo(self._spec, 'build-system') - self._build_system = self._spec.get('build-system', _DEFAULT_BACKEND) - - if 'build-backend' not in self._build_system: - _find_typo(self._build_system, 'build-backend') - _find_typo(self._build_system, 'requires') - self._build_system['build-backend'] = _DEFAULT_BACKEND['build-backend'] - self._build_system['requires'] = self._build_system.get('requires', []) + _DEFAULT_BACKEND['requires'] - - if 'requires' not in self._build_system: - raise BuildException("Missing 'build-system.requires' in pyproject.toml") - + raise BuildException('Failed to parse {}: {} '.format(spec_file, e)) + + build_system = spec.get('build-system') + # if pyproject.toml is missing (per PEP 517) or [build-system] is missing (pep PEP 518), + # use default values. + if build_system is None: + _find_typo(spec, 'build-system') + build_system = _DEFAULT_BACKEND + # if [build-system] is present, it must have a ``requires`` field (per PEP 518). + elif 'requires' not in build_system: + _find_typo(build_system, 'requires') + raise BuildException("Missing 'build-system.requires' in {}".format(spec_file)) + # if ``build-backend`` is missing, inject the legacy setuptools backend + # but leave ``requires`` alone to emulate pip. + elif 'build-backend' not in build_system: + _find_typo(build_system, 'build-backend') + build_system['build-backend'] = _DEFAULT_BACKEND['build-backend'] + + self._build_system = build_system self._backend = self._build_system['build-backend'] self._hook = pep517.wrappers.Pep517HookCaller(
Double requirement error if build-backend missing I have the following pyproject.toml: ```toml [build-system] requires = ["wheel", "setuptools>=42", "setuptools_scm[toml]>=3.4"] ``` Build fails with: ``` Looking in links: /tmp/tmpu9cyr1oc Processing /tmp/tmpu9cyr1oc/setuptools-47.1.0-py3-none-any.whl Processing /tmp/tmpu9cyr1oc/pip-20.1.1-py2.py3-none-any.whl Installing collected packages: setuptools, pip Successfully installed pip setuptools ERROR: Double requirement given: setuptools>=42 (from -r /tmp/build-reqs-akrau6fv.txt (line 2)) (already in setuptools>=40.8.0 (from -r /tmp/build-reqs-akrau6fv.txt (line 1)), name='setuptools') WARNING: You are using pip version 20.1.1; however, version 20.2.3 is available. You should consider upgrading via the '/opt/hostedtoolcache/Python/3.8.5/x64/bin/python -m pip install --upgrade pip' command. Traceback (most recent call last): File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/build/__main__.py", line 176, in <module> main(sys.argv[1:], 'python -m build') File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/build/__main__.py", line 168, in main build(args.srcdir, args.outdir, distributions, config_settings, not args.no_isolation, args.skip_dependencies) File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/build/__main__.py", line 80, in build _build_in_isolated_env(builder, outdir, distributions) File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/build/__main__.py", line 45, in _build_in_isolated_env env.install(builder.build_dependencies) File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/build/env.py", line 176, in install subprocess.check_call(cmd) File "/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py", line 364, in check_call raise CalledProcessError(retcode, cmd) subprocess.CalledProcessError: Command '['/opt/hostedtoolcache/Python/3.8.5/x64/bin/python', '-m', 'pip', 'install', '--prefix', '/tmp/build-env-6o04yjl_', '-r', '/tmp/build-reqs-akrau6fv.txt']' returned non-zero exit status 1. ``` However, there is a missing line - and adding it fixed the problem: ```toml build-backend = "setuptools.build_meta" ``` a) Is it possible to improve the error message and b) should this error be happening anyway? It seems to be trying to recursively produce all the requirements manually and put then into a requirements.txt-like file, which then collide?
pypa/build
diff --git a/tests/conftest.py b/tests/conftest.py index c06d26a..fe76ba6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -118,6 +118,11 @@ def test_no_requires_path(packages_path): return os.path.join(packages_path, 'test-no-requires') [email protected] +def test_optional_hooks_path(packages_path): + return os.path.join(packages_path, 'test-optional-hooks') + + @pytest.fixture def test_typo(packages_path): return os.path.join(packages_path, 'test-typo') diff --git a/tests/packages/test-optional-hooks/hookless_backend.py b/tests/packages/test-optional-hooks/hookless_backend.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/packages/test-optional-hooks/pyproject.toml b/tests/packages/test-optional-hooks/pyproject.toml new file mode 100644 index 0000000..2796891 --- /dev/null +++ b/tests/packages/test-optional-hooks/pyproject.toml @@ -0,0 +1,4 @@ +[build-system] +requires = [] +build-backend = 'hookless_backend' +backend-path = ['.'] diff --git a/tests/test_projectbuilder.py b/tests/test_projectbuilder.py index 1d38efa..7cbd19f 100644 --- a/tests/test_projectbuilder.py +++ b/tests/test_projectbuilder.py @@ -148,6 +148,13 @@ def test_get_dependencies_missing_backend(packages_path, distribution): builder.get_dependencies(distribution) [email protected]('distribution', ['wheel', 'sdist']) +def test_get_dependencies_missing_optional_hooks(test_optional_hooks_path, distribution): + builder = build.ProjectBuilder(test_optional_hooks_path) + + assert builder.get_dependencies(distribution) == set() + + @pytest.mark.parametrize('distribution', ['wheel', 'sdist']) def test_build_missing_backend(packages_path, distribution, tmpdir): bad_backend_path = os.path.join(packages_path, 'test-bad-backend') @@ -238,7 +245,7 @@ def test_missing_backend(mocker, test_no_backend_path): builder = build.ProjectBuilder(test_no_backend_path) - assert builder._build_system == DEFAULT_BACKEND + assert builder._build_system == {'requires': [], 'build-backend': DEFAULT_BACKEND['build-backend']} def test_missing_requires(mocker, test_no_requires_path):
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "pytest-xdist" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/pypa/build.git@a1ef3b82ab8db4d889bf4e52c451a3308bb1eb6f#egg=build coverage==7.8.0 exceptiongroup==1.2.2 execnet==2.1.1 iniconfig==2.1.0 packaging==24.2 pep517==0.13.1 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 toml==0.10.2 tomli==2.2.1
name: build channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - build==0.1.0 - coverage==7.8.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - iniconfig==2.1.0 - packaging==24.2 - pep517==0.13.1 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - toml==0.10.2 - tomli==2.2.1 prefix: /opt/conda/envs/build
[ "tests/test_projectbuilder.py::test_missing_backend" ]
[ "tests/test_projectbuilder.py::test_init" ]
[ "tests/test_projectbuilder.py::test_check_version[something-True-False]", "tests/test_projectbuilder.py::test_check_version[something_else-False-False]", "tests/test_projectbuilder.py::test_check_version[something[extra]-False-False]", "tests/test_projectbuilder.py::test_check_version[something[some_extra]-True-True]", "tests/test_projectbuilder.py::test_check_version[something_else;", "tests/test_projectbuilder.py::test_check_version[something", "tests/test_projectbuilder.py::test_check_version[something[some_extra]", "tests/test_projectbuilder.py::test_python_executable[something]", "tests/test_projectbuilder.py::test_python_executable[something_else]", "tests/test_projectbuilder.py::test_get_dependencies_missing_backend[wheel]", "tests/test_projectbuilder.py::test_get_dependencies_missing_backend[sdist]", "tests/test_projectbuilder.py::test_get_dependencies_missing_optional_hooks[wheel]", "tests/test_projectbuilder.py::test_get_dependencies_missing_optional_hooks[sdist]", "tests/test_projectbuilder.py::test_build_missing_backend[wheel]", "tests/test_projectbuilder.py::test_build_missing_backend[sdist]", "tests/test_projectbuilder.py::test_check_dependencies", "tests/test_projectbuilder.py::test_working_directory", "tests/test_projectbuilder.py::test_build", "tests/test_projectbuilder.py::test_default_backend", "tests/test_projectbuilder.py::test_missing_requires", "tests/test_projectbuilder.py::test_build_system_typo", "tests/test_projectbuilder.py::test_missing_outdir", "tests/test_projectbuilder.py::test_relative_outdir", "tests/test_projectbuilder.py::test_not_dir_outdir" ]
[]
MIT License
8,842
625
[ "src/build/__init__.py" ]
huffmanjohnf__temp-humidity-hexbin-9
2d30946d8044582d1bbe0127e53bb16c6ac14fa4
2020-11-02 21:53:08
2d30946d8044582d1bbe0127e53bb16c6ac14fa4
diff --git a/temp_humidity_hexbin/visual.py b/temp_humidity_hexbin/visual.py index a85a92f..79ffb86 100644 --- a/temp_humidity_hexbin/visual.py +++ b/temp_humidity_hexbin/visual.py @@ -1,8 +1,46 @@ +from typing import Tuple + import pandas as pd import seaborn as sns -def hexbin_plt(df: pd.DataFrame, sp_cool: float = 72.0, sp_heat: float = 68.0, sp_humi: float = 55.0): +def get_range(S: pd.Series): + return (S.quantile(0.01), S.quantile(0.99)) + + +def find_edge(considerations, how: str = "max", buffer: int = 1): + assert how in ["min", "max"] + if how == "min": + return min(considerations) - buffer + else: + return max(considerations) + buffer + + +def gridsize(x_01: float, x_99: float, y_01: float, y_99: float, scale: int = 2): + xgrid = x_99 - x_01 + ygrid = y_99 - y_01 + correction_factor = min([xgrid, ygrid]) + xgrid = max([int(xgrid * (correction_factor / xgrid) * scale), 1]) + ygrid = max([int(ygrid * (correction_factor / ygrid) * scale), 1]) + return (ygrid, xgrid) + + +def hexbin_plt( + df: pd.DataFrame, + sp_cool: float = 72.0, + sp_heat: float = 68.0, + sp_humi: float = 55.0, + xlim: Tuple[int] = None, + ylim: Tuple[int] = None, +): + x_range = get_range(df["Temperature"]) + y_range = get_range(df["Humidity"]) + + if not xlim: + xlim = (find_edge([sp_heat, x_range[0]], "min"), find_edge([sp_cool, x_range[1]], "max")) + if not ylim: + ylim = (y_range[0], find_edge([sp_humi, y_range[1]], "max")) + hexbin = sns.jointplot( x="Temperature", y="Humidity", @@ -12,8 +50,9 @@ def hexbin_plt(df: pd.DataFrame, sp_cool: float = 72.0, sp_heat: float = 68.0, s ratio=15, space=0, edgecolor="w", - xlim=(64, 76), - ylim=(45, 65), + xlim=xlim, + ylim=ylim, + joint_kws=dict(gridsize=gridsize(*x_range, *y_range)), color="k", )
Automate Defaults **What's your idea?** Automate suggested defaults that are data-driven. Specifically: - [x] gridsize - [x] graph window
huffmanjohnf/temp-humidity-hexbin
diff --git a/tests/test_visual.py b/tests/test_visual.py index a954758..55dafc6 100644 --- a/tests/test_visual.py +++ b/tests/test_visual.py @@ -4,8 +4,18 @@ from temp_humidity_hexbin.visual import hexbin_plt @pytest.mark.parametrize( - "sp_cool, sp_heat, sp_humi", - [(72.0, 68.0, 55.0), (76.0, 64.0, 60.0), (72.5, 68.4, 55.6)], + "sp_cool, sp_heat, sp_humi, xlim, ylim", + [ + (72.0, 68.0, 55.0, (64, 76), (40, 65)), + (76.0, 64.0, 60.0, (63.5, 72.5), (50.5, 60.5)), + (72.5, 68.4, 55.6, None, None), + (72.0, 68.0, 55.0, None, (45, 65)), + (72.0, 68.0, 55.0, None, None), + ], ) -def test_hexbin_plt(joined_df, sp_cool, sp_heat, sp_humi): - _ = hexbin_plt(joined_df, sp_cool, sp_heat, sp_humi) +def test_hexbin_plt(joined_df, sp_cool, sp_heat, sp_humi, xlim, ylim): + _ = hexbin_plt(joined_df, sp_cool, sp_heat, sp_humi, xlim, ylim) + + +def test_all_defaults(joined_df): + _ = hexbin_plt(joined_df)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "flake8", "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
altair==5.0.1 astor==0.8.1 attrs==24.2.0 backports.zoneinfo==0.2.1 base58==2.1.1 blinker==1.6.3 boto3==1.33.13 botocore==1.33.13 cachetools==5.5.2 certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 coverage==7.2.7 cycler==0.11.0 decorator==5.1.1 enum-compat==0.0.3 exceptiongroup==1.2.2 flake8==5.0.4 gitdb==4.0.12 GitPython==3.1.44 idna==3.10 importlib-metadata==4.2.0 importlib-resources==5.12.0 iniconfig==2.0.0 Jinja2==3.1.6 jmespath==1.0.1 jsonschema==4.17.3 kiwisolver==1.4.5 MarkupSafe==2.1.5 matplotlib==3.3.2 mccabe==0.7.0 numpy==1.21.6 packaging==24.0 pandas==1.1.3 Pillow==7.2.0 pkgutil_resolve_name==1.3.10 plotly==4.10.0 pluggy==1.2.0 protobuf==4.24.4 pyarrow==12.0.1 pycodestyle==2.9.1 pydeck==0.8.1b1 pyflakes==2.5.0 pyparsing==3.1.4 pyrsistent==0.19.3 pytest==7.4.4 pytest-cov==4.1.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.31.0 retrying==1.3.4 s3transfer==0.8.2 scipy==1.7.3 seaborn==0.11.0 six==1.17.0 smmap==5.0.2 streamlit==0.69.2 -e git+https://github.com/huffmanjohnf/temp-humidity-hexbin.git@2d30946d8044582d1bbe0127e53bb16c6ac14fa4#egg=temp_humidity_hexbin toml==0.10.2 tomli==2.0.1 toolz==0.12.1 tornado==6.2 typing_extensions==4.7.1 tzlocal==5.1 urllib3==1.26.20 validators==0.20.0 watchdog==3.0.0 zipp==3.15.0
name: temp-humidity-hexbin channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - altair==5.0.1 - astor==0.8.1 - attrs==24.2.0 - backports-zoneinfo==0.2.1 - base58==2.1.1 - blinker==1.6.3 - boto3==1.33.13 - botocore==1.33.13 - cachetools==5.5.2 - charset-normalizer==3.4.1 - click==8.1.8 - coverage==7.2.7 - cycler==0.11.0 - decorator==5.1.1 - enum-compat==0.0.3 - exceptiongroup==1.2.2 - flake8==5.0.4 - gitdb==4.0.12 - gitpython==3.1.44 - idna==3.10 - importlib-metadata==4.2.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - jinja2==3.1.6 - jmespath==1.0.1 - jsonschema==4.17.3 - kiwisolver==1.4.5 - markupsafe==2.1.5 - matplotlib==3.3.2 - mccabe==0.7.0 - numpy==1.21.6 - packaging==24.0 - pandas==1.1.3 - pillow==7.2.0 - pkgutil-resolve-name==1.3.10 - plotly==4.10.0 - pluggy==1.2.0 - protobuf==4.24.4 - pyarrow==12.0.1 - pycodestyle==2.9.1 - pydeck==0.8.1b1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pyrsistent==0.19.3 - pytest==7.4.4 - pytest-cov==4.1.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.31.0 - retrying==1.3.4 - s3transfer==0.8.2 - scipy==1.7.3 - seaborn==0.11.0 - six==1.17.0 - smmap==5.0.2 - streamlit==0.69.2 - toml==0.10.2 - tomli==2.0.1 - toolz==0.12.1 - tornado==6.2 - typing-extensions==4.7.1 - tzlocal==5.1 - urllib3==1.26.20 - validators==0.20.0 - watchdog==3.0.0 - zipp==3.15.0 prefix: /opt/conda/envs/temp-humidity-hexbin
[ "tests/test_visual.py::test_hexbin_plt[72.0-68.0-55.0-xlim0-ylim0]", "tests/test_visual.py::test_hexbin_plt[76.0-64.0-60.0-xlim1-ylim1]", "tests/test_visual.py::test_hexbin_plt[72.5-68.4-55.6-None-None]", "tests/test_visual.py::test_hexbin_plt[72.0-68.0-55.0-None-ylim3]", "tests/test_visual.py::test_hexbin_plt[72.0-68.0-55.0-None-None]" ]
[]
[ "tests/test_visual.py::test_all_defaults" ]
[]
MIT License
8,855
704
[ "temp_humidity_hexbin/visual.py" ]
podhmo__swagger-marshmallow-codegen-74
6d5dcfa88e8882a293434e3c3fcbf4837fd21c7d
2020-11-03 10:22:46
f4fe57fc4912d7aa0c4c188fb71b78a8a244922d
diff --git a/swagger_marshmallow_codegen/codegen/config.py b/swagger_marshmallow_codegen/codegen/config.py index 429da80..56f8b2f 100644 --- a/swagger_marshmallow_codegen/codegen/config.py +++ b/swagger_marshmallow_codegen/codegen/config.py @@ -2,10 +2,11 @@ from __future__ import annotations import typing_extensions as tx -class ConfigDict(tx.TypedDict): +class ConfigDict(tx.TypedDict, total=False): schema: bool input: bool output: bool emit_schema_even_primitive_type: bool skip_header_comment: bool + header_comment: str diff --git a/swagger_marshmallow_codegen/codegen/v2/codegen.py b/swagger_marshmallow_codegen/codegen/v2/codegen.py index 568f432..ecf29f2 100644 --- a/swagger_marshmallow_codegen/codegen/v2/codegen.py +++ b/swagger_marshmallow_codegen/codegen/v2/codegen.py @@ -95,7 +95,9 @@ class SchemaWriter: logger.debug(" nested: %s, %s", caller_name, field_class_name) if opts: kwargs = LazyFormat(", {}", kwargs) - value = LazyFormat("{}(lambda: {}(){})", caller_name, field_class_name, kwargs) + value = LazyFormat( + "{}(lambda: {}(){})", caller_name, field_class_name, kwargs + ) else: if caller_name == "fields.Nested": caller_name = "fields.Field" @@ -488,9 +490,14 @@ class Codegen: def resolver(self) -> Resolver: return self.accessor.resolver - def write_header(self, c): - c.im.stmt("# -*- coding:utf-8 -*-") - c.im.stmt("# this is auto-generated by swagger-marshmallow-codegen") + def write_header(self, c, *, comment: t.Optional[str] = None): + if comment is None: + comment = """\ +# this is auto-generated by swagger-marshmallow-codegen +from __future__ import annotations +""" + for line in comment.splitlines(): + c.im.stmt(line) def write_import_(self, c): c.from_(*self.schema_class_path.rsplit(":", 1)) @@ -509,7 +516,7 @@ class Codegen: def codegen(self, d, ctx=None): c = ctx or Context() if not self.accessor.config.get("skip_header_comment", False): - self.write_header(c) + self.write_header(c, comment=self.accessor.config.get("header_comment")) c.m.sep() self.write_import_(c) self.write_body(c, d) diff --git a/swagger_marshmallow_codegen/codegen/v3/codegen.py b/swagger_marshmallow_codegen/codegen/v3/codegen.py index 4b8d214..0e27d70 100644 --- a/swagger_marshmallow_codegen/codegen/v3/codegen.py +++ b/swagger_marshmallow_codegen/codegen/v3/codegen.py @@ -1,1 +1,2 @@ from ..v2.codegen import Codegen +__all__ = ["Codegen"] diff --git a/swagger_marshmallow_codegen/dispatcher.py b/swagger_marshmallow_codegen/dispatcher.py index a5252be..2bf21f1 100644 --- a/swagger_marshmallow_codegen/dispatcher.py +++ b/swagger_marshmallow_codegen/dispatcher.py @@ -22,7 +22,7 @@ TYPE_MAP = { Pair(type="string", format=None): "marshmallow.fields:String", Pair(type="boolean", format=None): "marshmallow.fields:Boolean", Pair(type="string", format="uuid"): "marshmallow.fields:UUID", - Pair(type="string", format="date-time"): "marshmallow.fields:DateTime", + Pair(type="string", format="date-time"): "marshmallow.fields:AwareDateTime", Pair(type="string", format="date"): "marshmallow.fields:Date", Pair(type="string", format="time"): "marshmallow.fields:Time", Pair(type="string", format="email"): "marshmallow.fields:Email", diff --git a/swagger_marshmallow_codegen/resolver.py b/swagger_marshmallow_codegen/resolver.py index 844e9a2..50a350c 100644 --- a/swagger_marshmallow_codegen/resolver.py +++ b/swagger_marshmallow_codegen/resolver.py @@ -1,6 +1,5 @@ # -*- coding:utf-8 -*- import logging -import sys from collections import OrderedDict import dictknife from .langhelpers import titleize, normalize
CI is broken, DateTime field handling is changed refs https://github.com/marshmallow-code/marshmallow/issues/1234
podhmo/swagger-marshmallow-codegen
diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00default.py b/swagger_marshmallow_codegen/tests/legacy_dst/00default.py index fc76808..090bfa1 100644 --- a/swagger_marshmallow_codegen/tests/legacy_dst/00default.py +++ b/swagger_marshmallow_codegen/tests/legacy_dst/00default.py @@ -10,7 +10,8 @@ class X(Schema): string = fields.String(missing=lambda: 'default') integer = fields.Integer(missing=lambda: 10) boolean = fields.Boolean(missing=lambda: True) - datetime = fields.DateTime(missing=lambda: datetime.datetime(2000, 1, 1, 1, 1, 1)) + datetime = fields.AwareDateTime(missing=lambda: datetime.datetime(2000, 1, 1, 1, 1, 1, tzinfo=datetime.timezone.utc)) + date = fields.Date(missing=lambda: datetime.date(2000, 1, 1)) object = fields.Nested(lambda: XObject(), missing=lambda: OrderedDict([('name', 'foo'), ('age', 20)])) array = fields.List(fields.Integer(), missing=lambda: [1, 2, 3]) diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py b/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py index c71e243..0890aac 100644 --- a/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py +++ b/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py @@ -1,3 +1,4 @@ +# flake8: noqa from marshmallow import ( Schema, fields, diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py b/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py index b8effe3..3b4e48e 100644 --- a/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py +++ b/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py @@ -15,7 +15,7 @@ class Pet(Schema): name = fields.String(required=True, description="Pet's name", validate=[Length(min=1, max=100, equal=None)]) animal_type = fields.String(required=True, description='Kind of animal', validate=[Length(min=1, max=None, equal=None)]) tags = fields.Field(description='Custom tags') - created = fields.DateTime(description='Creation time', dump_only=True) + created = fields.AwareDateTime(description='Creation time', dump_only=True) class PetsInput: diff --git a/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml b/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml index df6f0e9..cad6cef 100644 --- a/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml +++ b/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml @@ -14,6 +14,10 @@ definitions: type: string format: date-time default: 2000-01-01T01:01:01Z + date: + type: string + format: date + default: 2000-01-01 object: type: object properties: diff --git a/swagger_marshmallow_codegen/tests/test_codegen_legacy.py b/swagger_marshmallow_codegen/tests/test_codegen_legacy.py index b3f67b9..d87dbd0 100644 --- a/swagger_marshmallow_codegen/tests/test_codegen_legacy.py +++ b/swagger_marshmallow_codegen/tests/test_codegen_legacy.py @@ -8,51 +8,53 @@ here = pathlib.Path(__file__).parent @pytest.mark.parametrize( - "src_file, dst_file", + "src_file, dst_file, header_comment", [ - ("./legacy_src/00person.yaml", "./legacy_dst/00person.py"), - ("./legacy_src/01person.yaml", "./legacy_dst/01person.py"), - ("./legacy_src/02person.yaml", "./legacy_dst/02person.py"), - ("./legacy_src/03person.yaml", "./legacy_dst/03person.py"), - ("./legacy_src/04person.yaml", "./legacy_dst/04person.py"), - ("./legacy_src/05person.yaml", "./legacy_dst/05person.py"), - ("./legacy_src/00commit.yaml", "./legacy_dst/00commit.py"), - ("./legacy_src/01commit.yaml", "./legacy_dst/01commit.py"), - ("./legacy_src/00emojis.yaml", "./legacy_dst/00emojis.py"), - ("./legacy_src/00stat.yaml", "./legacy_dst/00stat.py"), - ("./legacy_src/00default.yaml", "./legacy_dst/00default.py"), - ("./legacy_src/00maximum.yaml", "./legacy_dst/00maximum.py"), - ("./legacy_src/00length.yaml", "./legacy_dst/00length.py"), - ("./legacy_src/00regex.yaml", "./legacy_dst/00regex.py"), - ("./legacy_src/00enum.yaml", "./legacy_dst/00enum.py"), - ("./legacy_src/00items.yaml", "./legacy_dst/00items.py"), - ("./legacy_src/00readonly.yaml", "./legacy_dst/00readonly.py"), - ("./legacy_src/00allOf.yaml", "./legacy_dst/00allOf.py"), - ("./legacy_src/00allOf2.yaml", "./legacy_dst/00allOf2.py"), - ("./legacy_src/01allOf2.yaml", "./legacy_dst/01allOf2.py"), - ("./legacy_src/02allOf2.yaml", "./legacy_dst/02allOf2.py"), - ("./legacy_src/00paths.yaml", "./legacy_dst/00paths.py"), - ("./legacy_src/01paths.yaml", "./legacy_dst/01paths.py"), - ("./legacy_src/02paths.yaml", "./legacy_dst/02paths.py"), - ("./legacy_src/03paths.yaml", "./legacy_dst/03paths.py"), - ("./legacy_src/00empty.yaml", "./legacy_dst/00empty.py"), - ("./legacy_src/01empty.yaml", "./legacy_dst/01empty.py"), + ("./legacy_src/00person.yaml", "./legacy_dst/00person.py", ""), + ("./legacy_src/01person.yaml", "./legacy_dst/01person.py", ""), + ("./legacy_src/02person.yaml", "./legacy_dst/02person.py", ""), + ("./legacy_src/03person.yaml", "./legacy_dst/03person.py", ""), + ("./legacy_src/04person.yaml", "./legacy_dst/04person.py", ""), + ("./legacy_src/05person.yaml", "./legacy_dst/05person.py", ""), + ("./legacy_src/00commit.yaml", "./legacy_dst/00commit.py", ""), + ("./legacy_src/01commit.yaml", "./legacy_dst/01commit.py", ""), + ("./legacy_src/00emojis.yaml", "./legacy_dst/00emojis.py", ""), + ("./legacy_src/00stat.yaml", "./legacy_dst/00stat.py", ""), + ("./legacy_src/00default.yaml", "./legacy_dst/00default.py", ""), + ("./legacy_src/00maximum.yaml", "./legacy_dst/00maximum.py", ""), + ("./legacy_src/00length.yaml", "./legacy_dst/00length.py", ""), + ("./legacy_src/00regex.yaml", "./legacy_dst/00regex.py", ""), + ("./legacy_src/00enum.yaml", "./legacy_dst/00enum.py", ""), + ("./legacy_src/00items.yaml", "./legacy_dst/00items.py", ""), + ("./legacy_src/00readonly.yaml", "./legacy_dst/00readonly.py", ""), + ("./legacy_src/00allOf.yaml", "./legacy_dst/00allOf.py", ""), + ("./legacy_src/00allOf2.yaml", "./legacy_dst/00allOf2.py", ""), + ("./legacy_src/01allOf2.yaml", "./legacy_dst/01allOf2.py", ""), + ("./legacy_src/02allOf2.yaml", "./legacy_dst/02allOf2.py", ""), + ("./legacy_src/00paths.yaml", "./legacy_dst/00paths.py", ""), + ("./legacy_src/01paths.yaml", "./legacy_dst/01paths.py", ""), + ("./legacy_src/02paths.yaml", "./legacy_dst/02paths.py", ""), + ("./legacy_src/03paths.yaml", "./legacy_dst/03paths.py", ""), + ("./legacy_src/00empty.yaml", "./legacy_dst/00empty.py", "# flake8: noqa"), + ("./legacy_src/01empty.yaml", "./legacy_dst/01empty.py", ""), ( "./legacy_src/00list_with_options.yaml", "./legacy_dst/00list_with_options.py", + "", ), - ("./legacy_src/00reserved.yaml", "./legacy_dst/00reserved.py"), - ("./legacy_src/00typearray.yaml", "./legacy_dst/00typearray.py"), - ("./legacy_src/00additional.yaml", "./legacy_dst/00additional.py"), - ("./legacy_src/01additional.yaml", "./legacy_dst/01additional.py"), - ("./legacy_src/00nullable.yaml", "./legacy_dst/00nullable.py"), - ("./legacy_src/00primitiveapi.yaml", "./legacy_dst/00primitiveapi.py"), + ("./legacy_src/00reserved.yaml", "./legacy_dst/00reserved.py", ""), + ("./legacy_src/00typearray.yaml", "./legacy_dst/00typearray.py", ""), + ("./legacy_src/00additional.yaml", "./legacy_dst/00additional.py", ""), + ("./legacy_src/01additional.yaml", "./legacy_dst/01additional.py", ""), + ("./legacy_src/00nullable.yaml", "./legacy_dst/00nullable.py", ""), + ("./legacy_src/00primitiveapi.yaml", "./legacy_dst/00primitiveapi.py", ""), # ("./legacy_src/00patternProperties.yaml", "./legacy_dst/00patternProperties.py"), not supported yet ], ) -def test_v2( - src_file, - dst_file, +def test( + src_file: str, + dst_file: str, + header_comment: str, ): from swagger_marshmallow_codegen.lifting import lifting_definition from swagger_marshmallow_codegen.codegen import Context @@ -62,9 +64,8 @@ def test_v2( get_codegen().codegen( lifting_definition(d), - {"schema": True, "input": True, "output": True}, + {"schema": True, "input": True, "output": True, "header_comment": header_comment}, ctx=ctx, - test=True, ) expected = load_dstfile(dst_file, here=here).rstrip("\n")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 5 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==25.1.0 click==8.1.8 dictknife==0.14.1 exceptiongroup==1.2.2 flake8==7.2.0 iniconfig==2.1.0 magicalimport==0.9.1 marshmallow==3.26.1 mccabe==0.7.0 mypy-extensions==1.0.0 packaging==24.2 pathspec==0.12.1 platformdirs==4.3.7 pluggy==1.5.0 prestring==0.9.0 pycodestyle==2.13.0 pyflakes==3.3.1 pytest==8.3.5 ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.12 -e git+https://github.com/podhmo/swagger-marshmallow-codegen.git@6d5dcfa88e8882a293434e3c3fcbf4837fd21c7d#egg=swagger_marshmallow_codegen tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0
name: swagger-marshmallow-codegen channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==25.1.0 - click==8.1.8 - dictknife==0.14.1 - exceptiongroup==1.2.2 - flake8==7.2.0 - iniconfig==2.1.0 - magicalimport==0.9.1 - marshmallow==3.26.1 - mccabe==0.7.0 - mypy-extensions==1.0.0 - packaging==24.2 - pathspec==0.12.1 - platformdirs==4.3.7 - pluggy==1.5.0 - prestring==0.9.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pytest==8.3.5 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.12 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 prefix: /opt/conda/envs/swagger-marshmallow-codegen
[ "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00person.yaml-./legacy_dst/00person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01person.yaml-./legacy_dst/01person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02person.yaml-./legacy_dst/02person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/03person.yaml-./legacy_dst/03person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/04person.yaml-./legacy_dst/04person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/05person.yaml-./legacy_dst/05person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00commit.yaml-./legacy_dst/00commit.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01commit.yaml-./legacy_dst/01commit.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00emojis.yaml-./legacy_dst/00emojis.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00stat.yaml-./legacy_dst/00stat.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00maximum.yaml-./legacy_dst/00maximum.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00length.yaml-./legacy_dst/00length.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00regex.yaml-./legacy_dst/00regex.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00enum.yaml-./legacy_dst/00enum.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00items.yaml-./legacy_dst/00items.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00readonly.yaml-./legacy_dst/00readonly.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00allOf.yaml-./legacy_dst/00allOf.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00allOf2.yaml-./legacy_dst/00allOf2.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01allOf2.yaml-./legacy_dst/01allOf2.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02allOf2.yaml-./legacy_dst/02allOf2.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00paths.yaml-./legacy_dst/00paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01paths.yaml-./legacy_dst/01paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02paths.yaml-./legacy_dst/02paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/03paths.yaml-./legacy_dst/03paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00empty.yaml-./legacy_dst/00empty.py-#", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01empty.yaml-./legacy_dst/01empty.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00list_with_options.yaml-./legacy_dst/00list_with_options.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00reserved.yaml-./legacy_dst/00reserved.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00typearray.yaml-./legacy_dst/00typearray.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00additional.yaml-./legacy_dst/00additional.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01additional.yaml-./legacy_dst/01additional.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00nullable.yaml-./legacy_dst/00nullable.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00primitiveapi.yaml-./legacy_dst/00primitiveapi.py-]" ]
[ "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00default.yaml-./legacy_dst/00default.py-]" ]
[]
[]
MIT License
8,860
1,076
[ "swagger_marshmallow_codegen/codegen/config.py", "swagger_marshmallow_codegen/codegen/v2/codegen.py", "swagger_marshmallow_codegen/codegen/v3/codegen.py", "swagger_marshmallow_codegen/dispatcher.py", "swagger_marshmallow_codegen/resolver.py" ]
Materials-Consortia__optimade-python-tools-586
d48b7a80e24aad591857c386498774037bbec89c
2020-11-03 14:23:25
f9d3b8a8fdf5e438f2b7c746bcdefb661de9c86c
codecov[bot]: # [Codecov](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586?src=pr&el=h1) Report > Merging [#586](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586?src=pr&el=desc) into [master](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/commit/d48b7a80e24aad591857c386498774037bbec89c?el=desc) will **decrease** coverage by `0.00%`. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586/graphs/tree.svg?width=650&height=150&src=pr&token=UJAtmqkZZO)](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #586 +/- ## ========================================== - Coverage 91.77% 91.77% -0.01% ========================================== Files 62 62 Lines 3222 3220 -2 ========================================== - Hits 2957 2955 -2 Misses 265 265 ``` | Flag | Coverage Δ | | |---|---|---| | project | `91.77% <100.00%> (-0.01%)` | :arrow_down: | | validator | `64.68% <0.00%> (+0.07%)` | :arrow_up: | Flags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags#carryforward-flags-in-the-pull-request-comment) to find out more. | [Impacted Files](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [optimade/models/structures.py](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586/diff?src=pr&el=tree#diff-b3B0aW1hZGUvbW9kZWxzL3N0cnVjdHVyZXMucHk=) | `95.75% <100.00%> (-0.04%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586?src=pr&el=footer). Last update [d48b7a8...c596ddc](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/586?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). ml-evs: Actually, why don't the tests I just added fail, e.g. https://github.com/Materials-Consortia/optimade-python-tools/blob/d48b7a80e24aad591857c386498774037bbec89c/tests/models/test_structures.py#L83-L85 ml-evs: > Actually, why don't the tests I just added fail, e.g. > > https://github.com/Materials-Consortia/optimade-python-tools/blob/d48b7a80e24aad591857c386498774037bbec89c/tests/models/test_structures.py#L83-L85 Okay mb, I see that test was updated (it just looked different to my local version) CasperWA: > Actually, why don't the tests I just added fail, e.g. > > https://github.com/Materials-Consortia/optimade-python-tools/blob/d48b7a80e24aad591857c386498774037bbec89c/tests/models/test_structures.py#L83-L85 Because I updated this exact test (as mentioned in the OP and found in the `git diff`).
diff --git a/optimade/models/structures.py b/optimade/models/structures.py index e1aebb19..82ad7f1c 100644 --- a/optimade/models/structures.py +++ b/optimade/models/structures.py @@ -774,11 +774,12 @@ The properties of the species are found in the property `species`. expected_elements = sorted(elements) if field.name == "chemical_formula_hill": - # Make sure C is first and H is second. - for elem in ("H", "C"): - if elem in expected_elements: - expected_elements.pop(expected_elements.index(elem)) - expected_elements.insert(0, elem) + # Make sure C is first (and H is second, if present along with C). + if "C" in expected_elements: + expected_elements = sorted( + expected_elements, + key=lambda elem: {"C": "0", "H": "1"}.get(elem, elem), + ) if any(elem not in CHEMICAL_SYMBOLS for elem in elements): raise ValueError(
Hill notation wrong (still) Actual definition: - If carbon is present, place it first, then hydrogen if present. - Following hydrogen, sort elements alphabetically. - If carbon is _not_ present, sort elements alphabetically, also if hydrogen is present. Example of valid Hill notation formulas: - C H<sub>4</sub> Cl O - Cl H<sub>4</sub> O - C Cl O Incorrect Hill notation: - H<sub>4</sub> Cl O (See, e.g., the [wikipedia entry](https://en.wikipedia.org/wiki/Chemical_formula#Hill_system) for Hill notation.)
Materials-Consortia/optimade-python-tools
diff --git a/tests/models/test_structures.py b/tests/models/test_structures.py index 1a6b8b56..2828c608 100644 --- a/tests/models/test_structures.py +++ b/tests/models/test_structures.py @@ -80,8 +80,8 @@ deformities = ( "Elements in 'chemical_formula_hill' must appear in Hill order: ['Ge', 'Si'] not ['Si', 'Ge']", ), ( - {"chemical_formula_hill": "GeHSi"}, - "Elements in 'chemical_formula_hill' must appear in Hill order: ['H', 'Ge', 'Si'] not ['Ge', 'H', 'Si']", + {"chemical_formula_hill": "HGeSi"}, + "Elements in 'chemical_formula_hill' must appear in Hill order: ['Ge', 'H', 'Si'] not ['H', 'Ge', 'Si']", ), ( {"chemical_formula_hill": "CGeHSi"},
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asgiref==3.2.10 astroid==2.12.14 attrs==24.2.0 beautifulsoup4==4.12.3 bracex==2.3.post1 certifi @ file:///croot/certifi_1671487769961/work/certifi cfgv==3.3.1 chardet==3.0.4 click==7.1.2 codecov==2.1.13 coverage==7.2.7 dill==0.3.7 distlib==0.3.9 Django==3.1.2 dnspython==2.3.0 elasticsearch==7.17.12 elasticsearch-dsl==7.3.0 email-validator==1.1.1 exceptiongroup==1.2.2 execnet==2.0.2 fastapi==0.61.1 filelock==3.12.2 ghp-import==2.1.0 h11==0.14.0 htmlmin==0.1.12 identify==2.5.24 idna==2.10 importlib-metadata==6.7.0 iniconfig==2.0.0 invoke==1.7.3 isort==5.11.5 Jinja2==2.11.2 jsmin==3.0.1 jsondiff==1.3.1 lark-parser==0.10.1 lazy-object-proxy==1.9.0 Markdown==3.4.4 MarkupSafe==2.1.5 mccabe==0.7.0 mergedeep==1.3.4 mkdocs==1.5.3 mkdocs-awesome-pages-plugin==2.9.2 mkdocs-material==6.2.8 mkdocs-material-extensions==1.2 mkdocs-minify-plugin==0.3.0 mkdocstrings==0.13.6 mongomock==3.21.0 natsort==8.4.0 nodeenv==1.9.1 numpy==1.21.6 -e git+https://github.com/Materials-Consortia/optimade-python-tools.git@d48b7a80e24aad591857c386498774037bbec89c#egg=optimade packaging==24.0 pathspec==0.11.2 platformdirs==2.6.1 pluggy==1.2.0 pre-commit==2.21.0 py==1.11.0 pydantic==1.6.1 Pygments==2.17.2 pylint==2.15.10 pymdown-extensions==8.2 pymongo==3.11.0 pytest==6.2.5 pytest-cov==2.12.1 pytest-xdist==3.5.0 python-dateutil==2.9.0.post0 pytkdocs==0.9.0 pytz==2025.2 PyYAML==6.0.1 pyyaml_env_tag==0.1 requests==2.24.0 sentinels==1.0.0 six==1.17.0 soupsieve==2.4.1 sqlparse==0.4.4 starlette==0.13.6 toml==0.10.2 tomli==2.0.1 tomlkit==0.12.5 typed-ast==1.5.5 typing-extensions==3.10.0.2 urllib3==1.25.11 uvicorn==0.12.2 virtualenv==20.21.1 watchdog==3.0.0 wcmatch==8.4.1 wrapt==1.16.0 zipp==3.15.0
name: optimade-python-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asgiref==3.2.10 - astroid==2.12.14 - attrs==24.2.0 - beautifulsoup4==4.12.3 - bracex==2.3.post1 - cfgv==3.3.1 - chardet==3.0.4 - click==7.1.2 - codecov==2.1.13 - coverage==7.2.7 - dill==0.3.7 - distlib==0.3.9 - django==3.1.2 - dnspython==2.3.0 - elasticsearch==7.17.12 - elasticsearch-dsl==7.3.0 - email-validator==1.1.1 - exceptiongroup==1.2.2 - execnet==2.0.2 - fastapi==0.61.1 - filelock==3.12.2 - ghp-import==2.1.0 - h11==0.14.0 - htmlmin==0.1.12 - identify==2.5.24 - idna==2.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - invoke==1.7.3 - isort==5.11.5 - jinja2==2.11.2 - jsmin==3.0.1 - jsondiff==1.3.1 - lark-parser==0.10.1 - lazy-object-proxy==1.9.0 - markdown==3.4.4 - markupsafe==2.1.5 - mccabe==0.7.0 - mergedeep==1.3.4 - mkdocs==1.5.3 - mkdocs-awesome-pages-plugin==2.9.2 - mkdocs-material==6.2.8 - mkdocs-material-extensions==1.2 - mkdocs-minify-plugin==0.3.0 - mkdocstrings==0.13.6 - mongomock==3.21.0 - natsort==8.4.0 - nodeenv==1.9.1 - numpy==1.21.6 - packaging==24.0 - pathspec==0.11.2 - platformdirs==2.6.1 - pluggy==1.2.0 - pre-commit==2.21.0 - py==1.11.0 - pydantic==1.6.1 - pygments==2.17.2 - pylint==2.15.10 - pymdown-extensions==8.2 - pymongo==3.11.0 - pytest==6.2.5 - pytest-cov==2.12.1 - pytest-xdist==3.5.0 - python-dateutil==2.9.0.post0 - pytkdocs==0.9.0 - pytz==2025.2 - pyyaml==6.0.1 - pyyaml-env-tag==0.1 - requests==2.24.0 - sentinels==1.0.0 - six==1.17.0 - soupsieve==2.4.1 - sqlparse==0.4.4 - starlette==0.13.6 - toml==0.10.2 - tomli==2.0.1 - tomlkit==0.12.5 - typed-ast==1.5.5 - typing-extensions==3.10.0.2 - urllib3==1.25.11 - uvicorn==0.12.2 - virtualenv==20.21.1 - watchdog==3.0.0 - wcmatch==8.4.1 - wrapt==1.16.0 - zipp==3.15.0 prefix: /opt/conda/envs/optimade-python-tools
[ "tests/models/test_structures.py::test_structure_deformities[deformity10]" ]
[]
[ "tests/models/test_structures.py::test_good_structures", "tests/models/test_structures.py::test_more_good_structures", "tests/models/test_structures.py::test_bad_structures", "tests/models/test_structures.py::test_structure_deformities[None]", "tests/models/test_structures.py::test_structure_deformities[deformity1]", "tests/models/test_structures.py::test_structure_deformities[deformity2]", "tests/models/test_structures.py::test_structure_deformities[deformity3]", "tests/models/test_structures.py::test_structure_deformities[deformity4]", "tests/models/test_structures.py::test_structure_deformities[deformity5]", "tests/models/test_structures.py::test_structure_deformities[deformity6]", "tests/models/test_structures.py::test_structure_deformities[deformity7]", "tests/models/test_structures.py::test_structure_deformities[deformity8]", "tests/models/test_structures.py::test_structure_deformities[deformity9]", "tests/models/test_structures.py::test_structure_deformities[deformity11]", "tests/models/test_structures.py::test_structure_deformities[deformity12]", "tests/models/test_structures.py::test_structure_deformities[deformity13]", "tests/models/test_structures.py::test_structure_deformities[deformity14]", "tests/models/test_structures.py::test_structure_deformities[deformity15]", "tests/models/test_structures.py::test_structure_deformities[deformity16]", "tests/models/test_structures.py::test_structure_deformities[deformity17]", "tests/models/test_structures.py::test_structure_deformities[deformity18]", "tests/models/test_structures.py::test_structure_deformities[deformity19]", "tests/models/test_structures.py::test_structure_deformities[deformity20]", "tests/models/test_structures.py::test_structure_deformities[deformity21]", "tests/models/test_structures.py::test_structure_deformities[deformity22]", "tests/models/test_structures.py::test_structure_deformities[deformity23]" ]
[]
MIT License
8,863
253
[ "optimade/models/structures.py" ]
matwey__pybeam-15
2dec4d87719d500070bf1cf3090d5a2cc62f8d9b
2020-11-03 15:00:57
2dec4d87719d500070bf1cf3090d5a2cc62f8d9b
lgtm-com[bot]: This pull request **introduces 1 alert** when merging 15b7ade6394c3939b6d13a17ca2d430928ddb634 into 2dec4d87719d500070bf1cf3090d5a2cc62f8d9b - [view on LGTM.com](https://lgtm.com/projects/g/matwey/pybeam/rev/pr-1687e21e88f7060ea78658080a383af5187b037c) **new alerts:** * 1 for Unused import
diff --git a/pybeam/beam_file.py b/pybeam/beam_file.py index eb7b126..6a70fef 100644 --- a/pybeam/beam_file.py +++ b/pybeam/beam_file.py @@ -26,53 +26,49 @@ class BeamFile(object): def __init__(self, f): if not hasattr(f, 'read'): f = open(f, "rb") - self._tree = beam.parse(f.read()) + self._chunks = beam.parse(f.read()) def selectChunkByName(self, name): - for c in self._tree.chunks: - if c.chunk_name == name: - return c - raise KeyError(name) + return self._chunks.get(name) @property def atoms(self): - try: - return self.selectChunkByName(b"AtU8").payload - except KeyError: - pass - return self.selectChunkByName(b"Atom").payload + atom = self.selectChunkByName(b"AtU8") + atom = atom if atom is not None else self.selectChunkByName(b"Atom") + return atom @property def attributes(self): attr = self.selectChunkByName(b"Attr") # convert from proplist to dict - return dict(attr.payload) + return dict(attr) if attr is not None else None @property def code(self): - code = self.selectChunkByName(b"Code").payload + code = self.selectChunkByName(b"Code") return (code.set, code.opcode_max, code.labels, code.functions, code.code) @property def compileinfo(self): cinf = self.selectChunkByName(b"CInf") - return dict(cinf.payload) + return dict(cinf) if cinf is not None else None @property def exports(self): expt = self.selectChunkByName(b"ExpT") atoms = self.atoms - return [(atoms[e.function-1], e.arity, e.label) for e in expt.payload.entry] + return [(atoms[e.function-1], e.arity, e.label) for e in expt.entry] if expt is not None else None @property def literals(self): - return [e.term for e in self.selectChunkByName(b"LitT").payload.data.entry] + litt = self.selectChunkByName(b"LitT") + return litt.entry if litt is not None else None @property def imports(self): impt = self.selectChunkByName(b"ImpT") atoms = self.atoms - return [(atoms[e.module-1], atoms[e.function-1], e.arity) for e in impt.payload.entry] + return [(atoms[e.module-1], atoms[e.function-1], e.arity) for e in impt.entry] if impt is not None else None @property def modulename(self): diff --git a/pybeam/erlang_types.py b/pybeam/erlang_types.py index 582ce50..299bc0e 100644 --- a/pybeam/erlang_types.py +++ b/pybeam/erlang_types.py @@ -20,86 +20,31 @@ # THE SOFTWARE. # -from six import iterbytes +from collections import namedtuple +from six import PY2 -class AtomCacheReference(object): - def __init__(self, index): - self.index = index - def __eq__(self,other): - return self.index == other.index +class AtomCacheReference(int): + pass -class Reference(object): - def __init__(self, node, id_, creation): - self.node = node - self.id = id_ - self.creation = creation - def __eq__(self,other): - return self.node == other.node and self.id == other.id and self.creation == other.creation +Reference = namedtuple("Reference", ["node", "id", "creation"]) -class Port(object): - def __init__(self, node, id_, creation): - self.node = node - self.id = id_ - self.creation = creation - def __eq__(self,other): - return self.node == other.node and self.id == other.id and self.creation == other.creation +Port = namedtuple("Port", ["node", "id", "creation"]) -class Pid(object): - def __init__(self, node, id_, serial, creation): - self.node = node - self.id = id_ - self.serial = serial - self.creation = creation - def __eq__(self,other): - return self.node == other.node and self.id == other.id and self.creation == other.creation and self.serial == other.serial +Pid = namedtuple("Pid", ["node", "id", "serial", "creation"]) -class String(object): - def __init__(self, value): - self.value = value - def __eq__(self, other): - return self.value == other.value - def __iter__(self): - return iterbytes(self.value) - def __len__(self): - return len(self.value) +if PY2: + class String(bytes): + def __getitem__(self, index): + return ord(super(String, self).__getitem__(index)) +else: + class String(bytes): + pass -class Binary(object): - def __init__(self, value): - self.value = value - def __eq__(self, other): - return self.value == other.value +class Binary(bytes): + pass -class Fun(object): - def __init__(self, arity, uniq, index, module, oldindex, olduniq, pid, free): - self.arity = arity - self.uniq = uniq - self.index = index - self.module = module - self.oldindex = oldindex - self.olduniq = olduniq - self.pid = pid - self.free = free - def __eq__(self, other): - return (self.arity == other.arity - and self.uniq == other.uniq - and self.index == other.index - and self.module == other.module - and self.oldindex == other.oldindex - and self.olduniq == other.olduniq - and self.pid == other.pid - and self.free == other.free) +Fun = namedtuple("Fun", ["arity", "uniq", "index", "module", "oldindex", "olduniq", "pid", "free"]) -class MFA(object): - def __init__(self, module, function, arity): - self.module = module - self.function = function - self.arity = arity - def __eq__(self, other): - return self.module == other.module and self.function == other.function and self.arity == other.arity +MFA = namedtuple("MFA", ["module", "function", "arity"]) -class BitBinary(object): - def __init__(self, value, bits): - self.value = value - self.bits = bits - def __eq__(self, other): - return self.value == other.value and self.bits == other.bits +BitBinary = namedtuple("BitBinary", ["value", "bits"]) diff --git a/pybeam/schema/beam/__init__.py b/pybeam/schema/beam/__init__.py index 107b7dc..1652253 100644 --- a/pybeam/schema/beam/__init__.py +++ b/pybeam/schema/beam/__init__.py @@ -20,20 +20,23 @@ # THE SOFTWARE. # -from construct import this -from construct import ( - Const, - FixedSized, - GreedyRange, - Int32ub, - Struct,) +from construct import Adapter, Const, FocusedSeq, GreedyRange, Int32ub, Prefixed, Terminated from pybeam.schema.beam.chunks import chunk -beam = Struct( - "for1" / Const(b'FOR1'), - "size" / Int32ub, - "beam" / Const(b'BEAM'), - "chunks" / FixedSized(this.size-4, GreedyRange(chunk))) + +class DictAdapter(Adapter): + def _decode(self, obj, context, path): + return dict(obj) + + def _encode(self, obj, context, path): + return obj.items() + +beam = FocusedSeq("chunks", + Const(b'FOR1'), + "chunks" / Prefixed(Int32ub, FocusedSeq("chunks", + Const(b'BEAM'), + "chunks" / DictAdapter(GreedyRange(chunk)), + Terminated))) __all__ = ["beam"] diff --git a/pybeam/schema/beam/chunks.py b/pybeam/schema/beam/chunks.py index 26696b3..3d0a373 100644 --- a/pybeam/schema/beam/chunks.py +++ b/pybeam/schema/beam/chunks.py @@ -26,14 +26,13 @@ from construct import ( Aligned, Bytes, Compressed, - Computed, - FixedSized, GreedyBytes, Int32ub, Int8ub, PascalString, Prefixed, PrefixedArray, + Sequence, Struct, Switch,) @@ -53,8 +52,7 @@ Code = Struct("headerlen" / Int32ub, "labels" / Int32ub, "functions" / Int32ub, Bytes(lambda ctx: ctx.headerlen-16), - Bytes(lambda ctx: ctx._.size-ctx.headerlen-4), - ) + GreedyBytes) ExpT = Struct("entry" / PrefixedArray(Int32ub, Struct("function" / Int32ub, "arity" / Int32ub, @@ -64,21 +62,17 @@ ImpT = Struct("entry" / PrefixedArray(Int32ub, Struct("module" / Int32ub, "function" / Int32ub, "arity" / Int32ub))) -uncomp_chunk_litt = Struct("entry" / PrefixedArray(Int32ub, Prefixed(Int32ub, Struct("term" / external_term)))) +uncomp_chunk_litt = PrefixedArray(Int32ub, Prefixed(Int32ub, external_term)) LitT = Struct(Int32ub, - "data" / Prefixed(Computed(lambda ctx: ctx._.size-4), - Compressed(uncomp_chunk_litt, "zlib") - ) -) + "entry" / Compressed(uncomp_chunk_litt, "zlib")) LocT = PrefixedArray(Int32ub, Struct("function" / Int32ub, "arity" / Int32ub, "label" / Int32ub)) -chunk = Struct( +chunk = Sequence( "chunk_name" / Bytes(4), - "size" / Int32ub, - "payload" / Aligned(4, FixedSized(this.size, Switch(this.chunk_name, { + Aligned(4, Prefixed(Int32ub, Switch(this.chunk_name, { # "Abst" : chunk_abst, b"Atom" : Atom, b"AtU8" : AtU8, @@ -93,7 +87,6 @@ chunk = Struct( b"LocT" : LocT, # "StrT" : chunk_strt, # "Trac" : chunk_trac, - }, default=GreedyBytes))), - ) + }, default=GreedyBytes)))) __all__ = ["chunk"] diff --git a/pybeam/schema/eetf.py b/pybeam/schema/eetf.py index 2685110..b2e0846 100644 --- a/pybeam/schema/eetf.py +++ b/pybeam/schema/eetf.py @@ -133,7 +133,7 @@ def tag(obj): term_ = LazyBound(lambda: term) atom_cache_ref = ExprAdapter(Int8ub, - encoder=lambda obj, ctx: obj.index, + encoder=lambda obj, ctx: obj, decoder=lambda obj, ctx: AtomCacheReference(obj)) small_integer = Int8ub integer = Int32sb @@ -163,13 +163,13 @@ nil = ExprAdapter(Sequence(), encoder=lambda obj, ctx: (), decoder=lambda obj, ctx: []) string = ExprAdapter(Prefixed(Int16ub, GreedyBytes), - encoder=lambda obj, ctx: obj.value, + encoder=lambda obj, ctx: obj, decoder=lambda obj, ctx: etString(obj)) list_ = ListAdapter(Sequence("len" / Int32ub, Array(this.len, term_), term_)) binary = ExprAdapter(Prefixed(Int32ub, GreedyBytes), - encoder=lambda obj, ctx: obj.value, + encoder=lambda obj, ctx: obj, decoder=lambda obj, ctx: Binary(obj)) small_big = BigInteger(Int8ub) large_big = BigInteger(Int32ub)
KeyError: b'CInf' for a BEAM file The following fails: ``` BeamFile('Elixir.IEx.Introspection.beam').compileinfo['source'] KeyError Traceback (most recent call last) <ipython-input-3-7cf8d4bc6f05> in <module> ----> 1 _2.compileinfo['source'] /usr/lib/python3.8/site-packages/pybeam/beam_file.py in compileinfo(self) 56 @property 57 def compileinfo(self): ---> 58 cinf = self.selectChunkByName(b"CInf") 59 return dict(cinf.payload) 60 /usr/lib/python3.8/site-packages/pybeam/beam_file.py in selectChunkByName(self, name) 33 if c.chunk_name == name: 34 return c ---> 35 raise KeyError(name) 36 37 @property KeyError: b'CInf' ``` [Elixir.IEx.Introspection.beam.txt](https://github.com/matwey/pybeam/files/5477601/Elixir.IEx.Introspection.beam.txt)
matwey/pybeam
diff --git a/test/beam_file.py b/test/beam_file.py index 66a02c9..5b53273 100644 --- a/test/beam_file.py +++ b/test/beam_file.py @@ -27,7 +27,7 @@ import io class BEAMFileTest(unittest.TestCase): def setUp(self): - self.raw = b'FOR1\x00\x00\x02\xb4BEAMAtom\x00\x00\x00U\x00\x00\x00\x08\x08ssh_math\x04ipow\x06crypto\x07mod_pow\x10bytes_to_integer\x0bmodule_info\x06erlang\x0fget_module_info\x00\x00\x00Code\x00\x00\x00\\\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x99\x00\x00\x00\x07\x00\x00\x00\x03\x01\x10\x99\x10\x02\x12"0\x01 \'\x15\x01#(\x15\x13\x01\x0c\x000\x99 \x070\x00\x99 \x08\x10\x10\x00\x010\x99\x00\x02\x12b\x00\x01@@\x12\x03\x99\x00N\x10 \x01P\x99\x00\x02\x12b\x10\x01`@\x03\x13@\x12\x03\x99\x00N 0\x03StrT\x00\x00\x00\x00ImpT\x00\x00\x004\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\x02ExpT\x00\x00\x00(\x00\x00\x00\x03\x00\x00\x00\x06\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02Attr\x00\x00\x00(\x83l\x00\x00\x00\x01h\x02d\x00\x03vsnl\x00\x00\x00\x01n\x10\x00\x8f\xde\xf9V}\xf3wr\x8a\x93\xc1p\xedDK\x9ajjCInf\x00\x00\x01@\x83l\x00\x00\x00\x04h\x02d\x00\x07optionsl\x00\x00\x00\x04h\x02d\x00\x06outdirk\x00</home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/../ebinh\x02d\x00\x01ik\x007/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/kernel/srcd\x00\x10warn_unused_varsd\x00\ndebug_infojh\x02d\x00\x07versionk\x00\x055.0.1h\x02d\x00\x04timeh\x06b\x00\x00\x07\xe0a\x02a\x0fa\x0ba\x08a\x12h\x02d\x00\x06sourcek\x00A/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/ssh_math.erlj' + self.raw = b'FOR1\x00\x00\x02\xd4BEAMAtom\x00\x00\x00U\x00\x00\x00\x08\x08ssh_math\x04ipow\x06crypto\x07mod_pow\x10bytes_to_integer\x0bmodule_info\x06erlang\x0fget_module_info\x00\x00\x00Code\x00\x00\x00\\\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x99\x00\x00\x00\x07\x00\x00\x00\x03\x01\x10\x99\x10\x02\x12"0\x01 \'\x15\x01#(\x15\x13\x01\x0c\x000\x99 \x070\x00\x99 \x08\x10\x10\x00\x010\x99\x00\x02\x12b\x00\x01@@\x12\x03\x99\x00N\x10 \x01P\x99\x00\x02\x12b\x10\x01`@\x03\x13@\x12\x03\x99\x00N 0\x03StrT\x00\x00\x00\x00ImpT\x00\x00\x004\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\x02ExpT\x00\x00\x00(\x00\x00\x00\x03\x00\x00\x00\x06\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02Attr\x00\x00\x00(\x83l\x00\x00\x00\x01h\x02d\x00\x03vsnl\x00\x00\x00\x01n\x10\x00\x8f\xde\xf9V}\xf3wr\x8a\x93\xc1p\xedDK\x9ajjCInf\x00\x00\x01@\x83l\x00\x00\x00\x04h\x02d\x00\x07optionsl\x00\x00\x00\x04h\x02d\x00\x06outdirk\x00</home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/../ebinh\x02d\x00\x01ik\x007/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/kernel/srcd\x00\x10warn_unused_varsd\x00\ndebug_infojh\x02d\x00\x07versionk\x00\x055.0.1h\x02d\x00\x04timeh\x06b\x00\x00\x07\xe0a\x02a\x0fa\x0ba\x08a\x12h\x02d\x00\x06sourcek\x00A/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/ssh_math.erljLitT\x00\x00\x00\x18\x00\x00\x00\x0ax\x9cc```d```j\xce\x02\x00\x01\x87\x00\xf1\x00\x00' self.io = io.BytesIO(self.raw) self.beam = beam_file.BeamFile(self.io) def test_attr(self): @@ -51,3 +51,5 @@ class BEAMFileTest(unittest.TestCase): self.assertListEqual([('crypto', 'mod_pow', 3), ('crypto', 'bytes_to_integer', 1), ('erlang', 'get_module_info', 1), ('erlang', 'get_module_info', 2)], self.beam.imports) def test_modulename(self): self.assertEqual('ssh_math', self.beam.modulename) + def test_literals(self): + self.assertListEqual([[]], self.beam.literals) diff --git a/test/schema_beam.py b/test/schema_beam.py index 9b9ee5f..c38fb8c 100644 --- a/test/schema_beam.py +++ b/test/schema_beam.py @@ -21,8 +21,9 @@ # from pybeam.schema import beam -from pybeam.schema.beam.chunks import Atom, AtU8, Attr, CInf, chunk +from pybeam.schema.beam.chunks import Atom, AtU8, Attr, CInf, LitT from construct import Container, StreamError +from construct.core import TerminatedError import unittest class BEAMConstructTest(unittest.TestCase): @@ -30,15 +31,15 @@ class BEAMConstructTest(unittest.TestCase): pass def test_beam1(self): c = beam - self.assertEqual(c.parse(b'FOR1\x00\x00\x00\x04BEAM'), Container(for1=b"FOR1", beam=b"BEAM", chunks=[], size=4)) + self.assertEqual(c.parse(b'FOR1\x00\x00\x00\x04BEAM'), {}) def test_beam2(self): c = beam raw = b'FOR1\x00\x00\x02TBEAMAtU8\x00\x00\x002\x00\x00\x00\x07\x01m\x04fact\x06erlang\x01-\x01*\x0bmodule_info\x0fget_module_info\x00\x00Code\x00\x00\x00w\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x99\x00\x00\x00\x08\x00\x00\x00\x03\x01\x10\x99\x10\x02\x12"\x10\x01 \'5\x01\x03\x0e\x10\x10\x99\x10}\x05\x10\x00\x03\x11\x13@\x03\x04@\x13\x03\x99\x10\x04\x10%\x99\x10}\x05\x10\x10\x04\x03\x03\x12\x10\x13\x010+\x15\x03\x01@\x11\x03\x13\x01@\x99\x00\x02\x12b\x00\x01P@\x12\x03\x99\x00N\x10 \x01`\x99\x00\x02\x12b\x10\x01p@\x03\x13@\x12\x03\x99\x00N 0\x03\x00StrT\x00\x00\x00\x00ImpT\x00\x00\x004\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00\x07\x00\x00\x00\x02ExpT\x00\x00\x00(\x00\x00\x00\x03\x00\x00\x00\x06\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02LocT\x00\x00\x00\x04\x00\x00\x00\x00Attr\x00\x00\x00(\x83l\x00\x00\x00\x01h\x02d\x00\x03vsnl\x00\x00\x00\x01n\x10\x007\xfc\x18\xc42\x03\xc0\xfa\xe0\x91w.a\xb8\xebqjjCInf\x00\x00\x00l\x83l\x00\x00\x00\x03h\x02d\x00\x07optionsl\x00\x00\x00\x01d\x00\rno_debug_infojh\x02d\x00\x07versionk\x00\x057.1.5h\x02d\x00\x06sourcek\x00!/home/matwey/rpmbuild/BUILD/m.erljDbgi\x00\x00\x00F\x83h\x03d\x00\rdebug_info_v1d\x00\x11erl_abstract_codeh\x02d\x00\x04nonel\x00\x00\x00\x01d\x00\rno_debug_infoj\x00\x00Line\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00A\x00\x00\x00' - parsed = c.parse(raw) - self.assertEqual(len(raw), 604) - self.assertEqual(parsed.for1, b'FOR1') - self.assertEqual(parsed.size, 596) - self.assertEqual(parsed.beam, b'BEAM') + chunks = c.parse(raw) + self.assertSetEqual(set(chunks.keys()), set([b'AtU8', b'Code', b'StrT', b'ImpT', b'ExpT', b'LocT', b'Attr', b'CInf', b'Dbgi', b'Line'])) + def test_beam3(self): + c = beam + self.assertRaises(TerminatedError, lambda: c.parse(b'FOR1\x00\x00\x00\x0cBEAMAtU8\x00\x00\x002')) def test_chunk_atom(self): c = Atom self.assertEqual(c.parse(b'\x00\x00\x00\x00'), []) @@ -63,9 +64,12 @@ class BEAMConstructTest(unittest.TestCase): self.assertEqual(c.parse(b'\x83\x64\x00\x08burtovoy'), u"burtovoy") self.assertEqual(c.parse(c.build(u"burtovoy")), u"burtovoy") self.assertEqual(c.parse(b'\x83\x6a'), []) - def test_chunk_litt(self): - c = chunk + def test_chunk_litt1(self): + c = LitT littc = b'x\x9cc```d```j\xce\x02\x00\x01\x87\x00\xf1' - litt = b'LitT\x00\x00\x00\x16\x00\x00\x00\x0a' + littc + b'\x00\x00' - self.assertEqual(c.parse(litt).payload.data.entry[0].term, []) - + litt = b'\x00\x00\x00\x0a' + littc + b'\x00\x00' + self.assertEqual(c.parse(litt).entry[0], []) + def test_chunk_litt2(self): + c = LitT + litt = b'\x00\x00\x03?x\x9cuR\xc1n\xd40\x10M\xb2mi\xab]T@THP4\xea\r\x84\x96\x13|\x01\x0b\x8a\xb4\x80T8\xf4f9\xce,\xf1\xaecG\xb6S\xef\nqZ\x89/\xe0\x02\'>\x80\x8f\xe1\x17\xf8\x0e.\xd8\xde\x84\x94\x03\x97\xf1\xf8yf\xde\x9b\x19\'Ir?I\x92\xcb\xad\xf0\xf6\xa0\xca\\Z\xd0\xd4\x1fG%2U7\xca \xcd\x02ZEtO5(\xa3\x97\xdax\x9cYM\xa5\x11\xd4"aT\x88\x82\xb2\x15\x91\xb4FB5\xb7\x1b\x9a.}\xd5\xd3m\xed\xed\xc9\xdd\xdf\xe3[_\xcf\x9e\x7f~\x9c\x8d\'\xbf\xbe\xff\xfc\xe1\xb1;\xf1ebqm\x9f\xd6T\xafJ\xe5d\x90\x11\xe1$z\xb6\xf3nD,\x05o\x1em\xab\x91\xcb>~Z\x06\xc9#7\x99\t\xbe\xe6z\xfaZ\x95\xad@wH\x08\x97\x0bE\xc8\x8e\xfca\x08\xfe\x7f\xc8 \xa2T\xac\xadQZj\xb9\n"\x1e\xc4\x89d\xee\xa4\xcf\xa5L\xab\xe9L^\xf9\xd6Ca\xe8\xde\x0f\xfb\xbe\xdd\xa4\x0e!\xfd5\xc4\xdc\x0e\xe4#\xc9\xc5`<\xba\xbf]%\xe9\xb1w\xee\xf5%\x16\xadd\x81\xd6\xed\xc7\x12!\xf5[T\xf5%_\x00\x97\xc6"-A-\x802\x86\xc6p\xf9\x01\xfeQ\x0b\x1b\xd5\x82S\xad(A\xf0\x15B\xad4B\xe8P\xd7\xbbwZ\xa8\xd6\x02\x85+*Z\x04\xa5;\xc0V\x08\x1aM+l,.\x01\xd7\x8d\x0e\x04J>\x81\xd6`\x0c8\xe7\xe7P\xa1hP\xf7J\xbc\xaeWq/i\x90\x98\xa1\x0c\xc7\xb3\xddl\xc1Q\x03\xe1\xe3p\x81%8n\xab@\xe4\xd5\x9a)\xbc\xab\x94\x0b\xda\x95\x14\x1b0\rz,\x0c>\x0e!u{v\xd3\xe0r\xd8\xf4\xf1\xb0\x9a\xd9<\xbf\xcc/\xc8\xecE\xfe\xfe\xed\x85\x07\xc6\x11>\xf8{\xbf\x19\xef~\xab/\xf3\xf9\x8c\x90\xeb\xc8<\x7f\xb3CN;\x9a\t-\x8c\xff\xb3\xcc\x12\xa6\xca\xc8w\x14cG\x82\x17!e\xdb\xf54\xfc\xdb\xa0\xeb\xfa4\x87\x14\xa3\xd9\x1f2\xc5\xe6\n\x00\x00\x00' + self.assertEqual(len(c.parse(litt).entry), 27)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 5 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 construct==2.10.70 docutils==0.18.1 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/matwey/pybeam.git@2dec4d87719d500070bf1cf3090d5a2cc62f8d9b#egg=pybeam Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: pybeam channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - construct==2.10.70 - docutils==0.18.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/pybeam
[ "test/schema_beam.py::BEAMConstructTest::test_beam1", "test/schema_beam.py::BEAMConstructTest::test_beam2", "test/schema_beam.py::BEAMConstructTest::test_beam3", "test/schema_beam.py::BEAMConstructTest::test_chunk_litt1", "test/schema_beam.py::BEAMConstructTest::test_chunk_litt2" ]
[]
[ "test/beam_file.py::BEAMFileTest::test_atoms", "test/beam_file.py::BEAMFileTest::test_attr", "test/beam_file.py::BEAMFileTest::test_compileinfo", "test/beam_file.py::BEAMFileTest::test_exports", "test/beam_file.py::BEAMFileTest::test_imports", "test/beam_file.py::BEAMFileTest::test_literals", "test/beam_file.py::BEAMFileTest::test_modulename", "test/schema_beam.py::BEAMConstructTest::test_chunk_atom", "test/schema_beam.py::BEAMConstructTest::test_chunk_attr", "test/schema_beam.py::BEAMConstructTest::test_chunk_atu8", "test/schema_beam.py::BEAMConstructTest::test_chunk_cinf" ]
[]
MIT License
8,864
3,008
[ "pybeam/beam_file.py", "pybeam/erlang_types.py", "pybeam/schema/beam/__init__.py", "pybeam/schema/beam/chunks.py", "pybeam/schema/eetf.py" ]
stephenhillier__starlette_exporter-13
7addf8c00f702c3ebe8cb720a8f0c7730ce76d58
2020-11-03 16:21:23
7addf8c00f702c3ebe8cb720a8f0c7730ce76d58
diff --git a/setup.py b/setup.py index 441e13f..1d286cc 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup setup( name='starlette_exporter', - version='0.5.0', + version='0.5.1', author='Stephen Hillier', author_email='[email protected]', packages=['starlette_exporter'], diff --git a/starlette_exporter/middleware.py b/starlette_exporter/middleware.py index aeeadec..4b04cd6 100644 --- a/starlette_exporter/middleware.py +++ b/starlette_exporter/middleware.py @@ -18,12 +18,15 @@ class PrometheusMiddleware: def __init__( self, app: ASGIApp, group_paths: bool = False, app_name: str = "starlette", - prefix="starlette" + prefix="starlette", buckets=None ): self.app = app self.group_paths = group_paths self.app_name = app_name self.prefix = prefix + self.kwargs = {} + if buckets is not None: + self.kwargs['buckets'] = buckets # Starlette initialises middleware multiple times, so store metrics on the class @property @@ -45,6 +48,7 @@ class PrometheusMiddleware: metric_name, "HTTP request duration, in seconds", ("method", "path", "status_code", "app_name"), + **self.kwargs, ) return PrometheusMiddleware._metrics[metric_name]
Add option to use custom buckets It would be nice if we could specify the default buckets for the histogram like this: ```python buckets = (.5, 1, 3, 5, 10, 15, 20, 30, 45, 60, 80, 100, 125, 150, 175, 200, 250, 300, 400) app.add_middleware(PrometheusMiddleware, group_paths=True, buckets=buckets) app.add_route("/metrics", handle_metrics) ``` They provide something similar in the flask_exporter: https://github.com/rycus86/prometheus_flask_exporter/blob/master/prometheus_flask_exporter/__init__.py Is this something I could put a pull request in for?
stephenhillier/starlette_exporter
diff --git a/tests/test_middleware.py b/tests/test_middleware.py index 4ec92d6..776b8ab 100644 --- a/tests/test_middleware.py +++ b/tests/test_middleware.py @@ -109,6 +109,33 @@ class TestMiddleware: in metrics ) + def test_histogram_custom_buckets(self, testapp): + """ test that custom histogram buckets appear after making requests """ + + buckets = (10, 20, 30, 40, 50) + client = TestClient(testapp(buckets=buckets)) + client.get('/200') + client.get('/500') + try: + client.get('/unhandled') + except: + pass + + metrics = client.get('/metrics').content.decode() + + assert ( + """starlette_request_duration_seconds_bucket{app_name="starlette",le="50.0",method="GET",path="/200",status_code="200"}""" + in metrics + ) + assert ( + """starlette_request_duration_seconds_bucket{app_name="starlette",le="50.0",method="GET",path="/500",status_code="500"}""" + in metrics + ) + assert ( + """starlette_request_duration_seconds_bucket{app_name="starlette",le="50.0",method="GET",path="/unhandled",status_code="500"}""" + in metrics + ) + def test_app_name(self, testapp): """ test that app_name label is populated correctly """ client = TestClient(testapp(app_name="testing"))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 2 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 chardet==3.0.4 exceptiongroup==1.2.2 idna==2.8 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 prometheus_client==0.7.1 pytest==8.3.5 requests==2.22.0 starlette==0.12.9 -e git+https://github.com/stephenhillier/starlette_exporter.git@7addf8c00f702c3ebe8cb720a8f0c7730ce76d58#egg=starlette_exporter tomli==2.2.1 urllib3==1.25.11
name: starlette_exporter channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - chardet==3.0.4 - exceptiongroup==1.2.2 - idna==2.8 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - prometheus-client==0.7.1 - pytest==8.3.5 - requests==2.22.0 - starlette==0.12.9 - tomli==2.2.1 - urllib3==1.25.11 prefix: /opt/conda/envs/starlette_exporter
[ "tests/test_middleware.py::TestMiddleware::test_histogram_custom_buckets" ]
[]
[ "tests/test_middleware.py::TestMiddleware::test_200", "tests/test_middleware.py::TestMiddleware::test_500", "tests/test_middleware.py::TestMiddleware::test_unhandled", "tests/test_middleware.py::TestMiddleware::test_histogram", "tests/test_middleware.py::TestMiddleware::test_app_name", "tests/test_middleware.py::TestMiddleware::test_prefix", "tests/test_middleware.py::TestMiddleware::test_multi_init", "tests/test_middleware.py::TestMiddleware::test_multi_prefix", "tests/test_middleware.py::TestMiddlewareGroupedPaths::test_200", "tests/test_middleware.py::TestMiddlewareGroupedPaths::test_500", "tests/test_middleware.py::TestMiddlewareGroupedPaths::test_unhandled", "tests/test_middleware.py::TestMiddlewareGroupedPaths::test_404", "tests/test_middleware.py::TestMiddlewareGroupedPaths::test_histogram" ]
[]
Apache License 2.0
8,865
379
[ "setup.py", "starlette_exporter/middleware.py" ]
dwavesystems__dwave-system-364
2f82124f518f7a57ae6b7f74c84a8aa62715ca4d
2020-11-03 18:59:42
32e9065cddeb123106b43d947877043c1a2ccc78
diff --git a/dwave/system/warnings.py b/dwave/system/warnings.py index e646ce3..3dc5d51 100644 --- a/dwave/system/warnings.py +++ b/dwave/system/warnings.py @@ -19,6 +19,7 @@ import logging import dimod import numpy as np import six +import collections.abc as abc from dwave.embedding import broken_chains @@ -186,8 +187,12 @@ class WarningHandler(object): # strength return - interactions = [uv for uv, bias in bqm.quadratic.items() - if abs(bias) >= chain_strength] + if isinstance(chain_strength, abc.Mapping): + interactions = [(u, v) for (u, v), bias in bqm.quadratic.items() + if abs(bias) >= min(chain_strength[u], chain_strength[v])] + else: + interactions = [uv for uv, bias in bqm.quadratic.items() + if abs(bias) >= chain_strength] if interactions: self.issue("Some quadratic biases are stronger than the given "
dwave.system.warnings not compatible with iterable as chain strength parameter Newer versions of the code allow the user to specify an individual value of chain strength for each chain by passing a dictionary to the sample function. However, calling `sample_qubo` with `chain_strength = dict_of_chain_strengths`, `warnings.py` throws an error because it expects a scalar value as the chain strength parameter. Setting `warnings = "ignore"` bypasses the warnings function call and the system produces the desired behavior. Obviously ignoring warnings is not desirable, so the warnings code should be updated to the new flexibility in chain strength input.
dwavesystems/dwave-system
diff --git a/tests/test_embedding_composite.py b/tests/test_embedding_composite.py index 7fec85d..fe7139c 100644 --- a/tests/test_embedding_composite.py +++ b/tests/test_embedding_composite.py @@ -332,6 +332,26 @@ class TestEmbeddingComposite(unittest.TestCase): count += 1 self.assertEqual(count, 0) + def test_warnings_chain_strength_dict(self): + sampler = EmbeddingComposite(MockDWaveSampler()) + + linear = {'a': -1, 'b': -2, 'c': -3} + quadratic = {('a', 'b'): 1, ('a', 'c'): -1, ('b', 'c'): 2} + bqm = dimod.BQM(linear, quadratic, 0, dimod.SPIN) + + chain_strength = {'a': 10, 'b': 20, 'c': 1.5} + ss = sampler.sample(bqm, chain_strength=chain_strength, warnings='SAVE') + + self.assertIn('warnings', ss.info) + self.assertEqual(len(ss.info['warnings']), 1) + + warning = ss.info['warnings'][0] + self.assertEqual(warning['type'], ChainStrengthWarning) + + interactions = warning['data']['source_interactions'] + self.assertEqual(len(interactions), 1) + self.assertCountEqual(interactions[0], ('b','c')) + def test_warnings_as_class_variable(self): G = dnx.chimera_graph(12)
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
1.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[drivers]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 coverage==7.2.7 decorator==4.4.2 dimod==0.9.10 dwave-cloud-client==0.8.1 dwave-drivers==0.4.4 dwave-networkx==0.8.4 -e git+https://github.com/dwavesystems/dwave-system.git@2f82124f518f7a57ae6b7f74c84a8aa62715ca4d#egg=dwave_system dwave-tabu==0.2.2 exceptiongroup==1.2.2 fasteners==0.19 homebase==1.0.1 idna==3.10 importlib-metadata==6.7.0 iniconfig==2.0.0 minorminer==0.2.0 mock==2.0.0 networkx==2.6.3 numpy==1.18.0 packaging==24.0 pbr==6.1.1 plucky==0.4.3 pluggy==1.2.0 PySocks==1.7.1 pytest==7.4.4 pytest-cov==4.1.0 python-dateutil==2.9.0.post0 requests==2.31.0 scipy==1.7.3 six==1.11.0 tomli==2.0.1 typing_extensions==4.7.1 urllib3==2.0.7 zipp==3.15.0
name: dwave-system channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==3.4.1 - click==8.1.8 - coverage==7.2.7 - decorator==4.4.2 - dimod==0.9.10 - dwave-cloud-client==0.8.1 - dwave-drivers==0.4.4 - dwave-networkx==0.8.4 - dwave-tabu==0.2.2 - exceptiongroup==1.2.2 - fasteners==0.19 - homebase==1.0.1 - idna==3.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - minorminer==0.2.0 - mock==2.0.0 - networkx==2.6.3 - numpy==1.18.0 - packaging==24.0 - pbr==6.1.1 - plucky==0.4.3 - pluggy==1.2.0 - pysocks==1.7.1 - pytest==7.4.4 - pytest-cov==4.1.0 - python-dateutil==2.9.0.post0 - requests==2.31.0 - scipy==1.7.3 - six==1.11.0 - tomli==2.0.1 - typing-extensions==4.7.1 - urllib3==2.0.7 - zipp==3.15.0 prefix: /opt/conda/envs/dwave-system
[ "tests/test_embedding_composite.py::TestEmbeddingComposite::test_warnings_chain_strength_dict" ]
[]
[ "tests/test_embedding_composite.py::TestEmbeddingComposite::test_chain_break_method_customization", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_embedding_parameters_construction", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_embedding_parameters_sample", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_find_embedding_kwarg", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_instantiation_smoketest", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_intermediate_composites", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_max_cut", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_return_embedding", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_return_embedding_as_class_variable", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjArrayBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjArrayBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjArrayBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjArrayBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjDictBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjDictBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjDictBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjDictBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjMapBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjMapBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjMapBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjMapBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjVectorBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjVectorBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjVectorBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_AdjVectorBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_ising", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_ising_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_ising_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_ising_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_ising_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_ising_unstructured_not_integer_labelled", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_qubo", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_qubo_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_qubo_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_qubo_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_qubo_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjArrayBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjArrayBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjArrayBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjArrayBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjDictBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjDictBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjDictBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjDictBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjMapBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjMapBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjMapBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjMapBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjVectorBQM_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjVectorBQM_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjVectorBQM_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_AdjVectorBQM_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_1path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_1var_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_2path_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_empty_<dwave.system.composites.embedding.EmbeddingComposite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_scale_aware_scale_composite", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_singleton_variables", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_warning_chain_strength", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_warnings", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_warnings_as_class_variable", "tests/test_embedding_composite.py::TestEmbeddingComposite::test_warnings_chain_strength_len1", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_adjacency", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_chain_break_method_customization", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_instantiation_empty_adjacency", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_instantiation_empty_embedding", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_instantiation_triangle", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_keyer", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_minimize_energy_chain_break_method", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_sample_bqm_triangle", "tests/test_embedding_composite.py::TestFixedEmbeddingComposite::test_without_embedding_and_adjacency", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_chain_break_method_customization", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_ising", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_qubo", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_same_embedding", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjArrayBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjArrayBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjArrayBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjArrayBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjDictBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjDictBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjDictBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjDictBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjMapBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjMapBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjMapBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjMapBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjVectorBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjVectorBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjVectorBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_AdjVectorBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_instantiation", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_ising_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_ising_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_ising_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_ising_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_qubo_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_qubo_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_qubo_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_qubo_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjArrayBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjArrayBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjArrayBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjArrayBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjDictBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjDictBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjDictBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjDictBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjMapBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjMapBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjMapBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjMapBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjVectorBQM_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjVectorBQM_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjVectorBQM_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_AdjVectorBQM_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_1path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_1var_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_2path_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_empty_<lambda>", "tests/test_embedding_composite.py::TestLazyFixedEmbeddingComposite::test_sparse_qubo", "tests/test_embedding_composite.py::TestLazyEmbeddingComposite::test_deprecation_raise", "tests/test_embedding_composite.py::TestLazyEmbeddingComposite::test_ising_sample", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_broken_find_embedding", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjArrayBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjArrayBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjArrayBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjArrayBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjDictBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjDictBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjDictBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjDictBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjMapBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjMapBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjMapBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjMapBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjVectorBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjVectorBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjVectorBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_AdjVectorBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_binary_BinaryQuadraticModel_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_ising_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_ising_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_ising_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_ising_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_qubo_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_qubo_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_qubo_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_qubo_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjArrayBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjArrayBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjArrayBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjArrayBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjDictBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjDictBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjDictBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjDictBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjMapBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjMapBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjMapBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjMapBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjVectorBQM_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjVectorBQM_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjVectorBQM_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_AdjVectorBQM_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_1path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_1var_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_2path_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_sample_spin_BinaryQuadraticModel_empty_<dwave.system.composites.embedding.AutoEmbeddingComposite", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_smoke", "tests/test_embedding_composite.py::TestAutoEmbeddingComposite::test_unstructured" ]
[]
Apache License 2.0
8,867
257
[ "dwave/system/warnings.py" ]
lark-parser__lark-745
0aac2760c86cc69ea499266bc9036af18e6b195e
2020-11-05 05:36:53
5149d38e259a226e31c9aa2e7692296bc0c2be76
diff --git a/lark/parsers/earley.py b/lark/parsers/earley.py index 3ccba01..d6afa71 100644 --- a/lark/parsers/earley.py +++ b/lark/parsers/earley.py @@ -315,7 +315,7 @@ class Parser: if self.tree_class is not None: # Perform our SPPF -> AST conversion - transformer = ForestToParseTree(self.tree_class, self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor(), self.resolve_ambiguity) + transformer = ForestToParseTree(self.tree_class, self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor(), self.resolve_ambiguity, self.debug) return transformer.transform(solutions[0]) # return the root of the SPPF diff --git a/lark/parsers/earley_forest.py b/lark/parsers/earley_forest.py index 532dedf..7a56bbc 100644 --- a/lark/parsers/earley_forest.py +++ b/lark/parsers/earley_forest.py @@ -483,26 +483,31 @@ class ForestToParseTree(ForestTransformer): tree. """ - def __init__(self, tree_class=Tree, callbacks=dict(), prioritizer=ForestSumVisitor(), resolve_ambiguity=True): + def __init__(self, tree_class=Tree, callbacks=dict(), prioritizer=ForestSumVisitor(), resolve_ambiguity=True, debug=False): super(ForestToParseTree, self).__init__() self.tree_class = tree_class self.callbacks = callbacks self.prioritizer = prioritizer self.resolve_ambiguity = resolve_ambiguity + self.debug = debug self._on_cycle_retreat = False + self._cycle_node = None + self._successful_visits = set() def on_cycle(self, node, path): - logger.warning("Cycle encountered in the SPPF at node: %s. " - "As infinite ambiguities cannot be represented in a tree, " - "this family of derivations will be discarded.", node) - if self.resolve_ambiguity: - # TODO: choose a different path if cycle is encountered - logger.warning("At this time, using ambiguity resolution for SPPFs " - "with cycles may result in None being returned.") + if self.debug: + logger.warning("Cycle encountered in the SPPF at node: %s. " + "As infinite ambiguities cannot be represented in a tree, " + "this family of derivations will be discarded.", node) + self._cycle_node = node self._on_cycle_retreat = True def _check_cycle(self, node): if self._on_cycle_retreat: + if id(node) == id(self._cycle_node): + self._cycle_node = None + self._on_cycle_retreat = False + return raise Discard() def _collapse_ambig(self, children): @@ -531,11 +536,17 @@ class ForestToParseTree(ForestTransformer): raise Discard() def transform_symbol_node(self, node, data): + if id(node) not in self._successful_visits: + raise Discard() + self._successful_visits.remove(id(node)) self._check_cycle(node) data = self._collapse_ambig(data) return self._call_ambig_func(node, data) def transform_intermediate_node(self, node, data): + if id(node) not in self._successful_visits: + raise Discard() + self._successful_visits.remove(id(node)) self._check_cycle(node) if len(data) > 1: children = [self.tree_class('_inter', c) for c in data] @@ -544,6 +555,8 @@ class ForestToParseTree(ForestTransformer): def transform_packed_node(self, node, data): self._check_cycle(node) + if self.resolve_ambiguity and id(node.parent) in self._successful_visits: + raise Discard() children = [] assert len(data) <= 2 data = PackedData(node, data) @@ -559,21 +572,23 @@ class ForestToParseTree(ForestTransformer): return self._call_rule_func(node, children) def visit_symbol_node_in(self, node): - self._on_cycle_retreat = False super(ForestToParseTree, self).visit_symbol_node_in(node) + if self._on_cycle_retreat: + return if self.prioritizer and node.is_ambiguous and isinf(node.priority): self.prioritizer.visit(node) - if self.resolve_ambiguity: - return node.children[0] return node.children def visit_packed_node_in(self, node): self._on_cycle_retreat = False - return super(ForestToParseTree, self).visit_packed_node_in(node) + to_visit = super(ForestToParseTree, self).visit_packed_node_in(node) + if not self.resolve_ambiguity or id(node.parent) not in self._successful_visits: + return to_visit - def visit_token_node(self, node): - self._on_cycle_retreat = False - return super(ForestToParseTree, self).visit_token_node(node) + def visit_packed_node_out(self, node): + super(ForestToParseTree, self).visit_packed_node_out(node) + if not self._on_cycle_retreat: + self._successful_visits.add(id(node.parent)) def handles_ambiguity(func): """Decorator for methods of subclasses of ``TreeForestTransformer``.
Bug in Earley parser when there is ambiguity in empty rules **Describe the bug** The Earley parser creates an incorrect tree **To Reproduce** ```python from lark import Lark grammar = """ a: _x _x: _x? b b: """ p = Lark(grammar, start='a', ambiguity='explicit') ``` This will throw an `IndexError` because the wrong node constructor gets called. The non-expanding variation doesn't throw an error, but creates a non-sensical result: ```python from lark import Lark grammar = """ a: x x: x? b b: """ p = Lark(grammar, start='a', ambiguity='explicit') print(p.parse('').pretty()) ``` Output ```python a _ambig x b x b ``` Earlier versions of Lark throw `ParseError: Infinite recursion in grammar!`, which is technically more correct, although not very useful either.
lark-parser/lark
diff --git a/tests/test_parser.py b/tests/test_parser.py index 32aa4fc..6d0981f 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -746,6 +746,76 @@ def _make_full_earley_test(LEXER): tree = parser.parse(text) self.assertEqual(tree.children, ['foo', 'bar']) + def test_cycle(self): + grammar = """ + start: start? + """ + + l = Lark(grammar, ambiguity='resolve', lexer=LEXER) + tree = l.parse('') + self.assertEqual(tree, Tree('start', [])) + + l = Lark(grammar, ambiguity='explicit', lexer=LEXER) + tree = l.parse('') + self.assertEqual(tree, Tree('start', [])) + + def test_cycles(self): + grammar = """ + a: b + b: c* + c: a + """ + + l = Lark(grammar, start='a', ambiguity='resolve', lexer=LEXER) + tree = l.parse('') + self.assertEqual(tree, Tree('a', [Tree('b', [])])) + + l = Lark(grammar, start='a', ambiguity='explicit', lexer=LEXER) + tree = l.parse('') + self.assertEqual(tree, Tree('a', [Tree('b', [])])) + + def test_many_cycles(self): + grammar = """ + start: a? | start start + !a: "a" + """ + + l = Lark(grammar, ambiguity='resolve', lexer=LEXER) + tree = l.parse('a') + self.assertEqual(tree, Tree('start', [Tree('a', ['a'])])) + + l = Lark(grammar, ambiguity='explicit', lexer=LEXER) + tree = l.parse('a') + self.assertEqual(tree, Tree('start', [Tree('a', ['a'])])) + + def test_cycles_with_child_filter(self): + grammar = """ + a: _x + _x: _x? b + b: + """ + + grammar2 = """ + a: x + x: x? b + b: + """ + + l = Lark(grammar, start='a', ambiguity='resolve', lexer=LEXER) + tree = l.parse('') + self.assertEqual(tree, Tree('a', [Tree('b', [])])) + + l = Lark(grammar, start='a', ambiguity='explicit', lexer=LEXER) + tree = l.parse(''); + self.assertEqual(tree, Tree('a', [Tree('b', [])])) + + l = Lark(grammar2, start='a', ambiguity='resolve', lexer=LEXER) + tree = l.parse(''); + self.assertEqual(tree, Tree('a', [Tree('x', [Tree('b', [])])])) + + l = Lark(grammar2, start='a', ambiguity='explicit', lexer=LEXER) + tree = l.parse(''); + self.assertEqual(tree, Tree('a', [Tree('x', [Tree('b', [])])]))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
0.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "Js2Py", "regex" ], "pre_install": null, "python": "3.9", "reqs_path": [ "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 Js2Py==0.68 -e git+https://github.com/lark-parser/lark.git@0aac2760c86cc69ea499266bc9036af18e6b195e#egg=lark_parser packaging==24.2 pluggy==1.5.0 pyjsparser==2.7.1 pytest==8.3.5 regex==2024.11.6 six==1.17.0 tomli==2.2.1 tzlocal==5.3.1
name: lark channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - js2py==0.68 - packaging==24.2 - pluggy==1.5.0 - pyjsparser==2.7.1 - pytest==8.3.5 - regex==2024.11.6 - six==1.17.0 - tomli==2.2.1 - tzlocal==5.3.1 prefix: /opt/conda/envs/lark
[ "tests/test_parser.py::TestFullEarleyDynamic::test_cycle", "tests/test_parser.py::TestFullEarleyDynamic::test_cycles", "tests/test_parser.py::TestFullEarleyDynamic::test_cycles_with_child_filter", "tests/test_parser.py::TestFullEarleyDynamic::test_many_cycles", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_cycle", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_cycles", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_cycles_with_child_filter", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_many_cycles" ]
[ "tests/test_parser.py::TestEarleyStandard::test_import_custom_sources", "tests/test_parser.py::TestCykStandard::test_import_custom_sources", "tests/test_parser.py::TestEarleyDynamic::test_import_custom_sources", "tests/test_parser.py::TestEarleyDynamic_complete::test_import_custom_sources", "tests/test_parser.py::TestEarleyDynamic_complete::test_postlex_declare", "tests/test_parser.py::TestLalrStandard::test_import_custom_sources", "tests/test_parser.py::TestLalrContextual::test_import_custom_sources", "tests/test_parser.py::TestLalrCustom::test_import_custom_sources" ]
[ "tests/test_parser.py::TestParsers::test_alias", "tests/test_parser.py::TestParsers::test_big_list", "tests/test_parser.py::TestParsers::test_comment_in_rule_definition", "tests/test_parser.py::TestParsers::test_embedded_transformer", "tests/test_parser.py::TestParsers::test_embedded_transformer_inplace", "tests/test_parser.py::TestParsers::test_expand1", "tests/test_parser.py::TestParsers::test_infinite_recurse", "tests/test_parser.py::TestParsers::test_propagate_positions", "tests/test_parser.py::TestParsers::test_same_ast", "tests/test_parser.py::TestParsers::test_vargs_meta", "tests/test_parser.py::TestParsers::test_vargs_tree", "tests/test_parser.py::TestParsers::test_visit_tokens", "tests/test_parser.py::TestEarleyStandard::test_alias", "tests/test_parser.py::TestEarleyStandard::test_alias_in_terminal", "tests/test_parser.py::TestEarleyStandard::test_backslash", "tests/test_parser.py::TestEarleyStandard::test_backslash2", "tests/test_parser.py::TestEarleyStandard::test_basic1", "tests/test_parser.py::TestEarleyStandard::test_basic2", "tests/test_parser.py::TestEarleyStandard::test_bytes_utf8", "tests/test_parser.py::TestEarleyStandard::test_dont_expand1_lists_with_multiple_items", "tests/test_parser.py::TestEarleyStandard::test_dont_expand1_lists_with_multiple_items_2", "tests/test_parser.py::TestEarleyStandard::test_empty", "tests/test_parser.py::TestEarleyStandard::test_empty_end", "tests/test_parser.py::TestEarleyStandard::test_empty_expand1_list", "tests/test_parser.py::TestEarleyStandard::test_empty_expand1_list_2", "tests/test_parser.py::TestEarleyStandard::test_empty_flatten_list", "tests/test_parser.py::TestEarleyStandard::test_escaped_string", "tests/test_parser.py::TestEarleyStandard::test_expand1_lists_with_one_item", "tests/test_parser.py::TestEarleyStandard::test_expand1_lists_with_one_item_2", "tests/test_parser.py::TestEarleyStandard::test_float_without_lexer", "tests/test_parser.py::TestEarleyStandard::test_g_regex_flags", "tests/test_parser.py::TestEarleyStandard::test_hex_escape", "tests/test_parser.py::TestEarleyStandard::test_hex_literal_range_escape", "tests/test_parser.py::TestEarleyStandard::test_ignore", "tests/test_parser.py::TestEarleyStandard::test_import", "tests/test_parser.py::TestEarleyStandard::test_import_errors", "tests/test_parser.py::TestEarleyStandard::test_import_rename", "tests/test_parser.py::TestEarleyStandard::test_join_regex_flags", "tests/test_parser.py::TestEarleyStandard::test_keep_all_tokens", "tests/test_parser.py::TestEarleyStandard::test_lexer_detect_newline_tokens", "tests/test_parser.py::TestEarleyStandard::test_lexer_prioritization", "tests/test_parser.py::TestEarleyStandard::test_lexer_token_limit", "tests/test_parser.py::TestEarleyStandard::test_line_and_column", "tests/test_parser.py::TestEarleyStandard::test_line_counting", "tests/test_parser.py::TestEarleyStandard::test_maybe", "tests/test_parser.py::TestEarleyStandard::test_maybe_placeholders", "tests/test_parser.py::TestEarleyStandard::test_meddling_unused", "tests/test_parser.py::TestEarleyStandard::test_multi_import", "tests/test_parser.py::TestEarleyStandard::test_multi_start", "tests/test_parser.py::TestEarleyStandard::test_postlex_declare", "tests/test_parser.py::TestEarleyStandard::test_prioritization", "tests/test_parser.py::TestEarleyStandard::test_ranged_repeat_rules", "tests/test_parser.py::TestEarleyStandard::test_ranged_repeat_terms", "tests/test_parser.py::TestEarleyStandard::test_reduce_cycle", "tests/test_parser.py::TestEarleyStandard::test_regex_escaping", "tests/test_parser.py::TestEarleyStandard::test_regex_quote", "tests/test_parser.py::TestEarleyStandard::test_relative_import", "tests/test_parser.py::TestEarleyStandard::test_relative_import_of_nested_grammar", "tests/test_parser.py::TestEarleyStandard::test_relative_import_preserves_leading_underscore", "tests/test_parser.py::TestEarleyStandard::test_relative_import_rename", "tests/test_parser.py::TestEarleyStandard::test_relative_import_rules_dependencies_imported_only_once", "tests/test_parser.py::TestEarleyStandard::test_relative_import_unicode", "tests/test_parser.py::TestEarleyStandard::test_relative_multi_import", "tests/test_parser.py::TestEarleyStandard::test_relative_rule_import", "tests/test_parser.py::TestEarleyStandard::test_relative_rule_import_drop_ignore", "tests/test_parser.py::TestEarleyStandard::test_relative_rule_import_rename", "tests/test_parser.py::TestEarleyStandard::test_relative_rule_import_subrule", "tests/test_parser.py::TestEarleyStandard::test_relative_rule_import_subrule_no_conflict", "tests/test_parser.py::TestEarleyStandard::test_rule_collision", "tests/test_parser.py::TestEarleyStandard::test_rule_collision2", "tests/test_parser.py::TestEarleyStandard::test_special_chars", "tests/test_parser.py::TestEarleyStandard::test_stack_for_ebnf", "tests/test_parser.py::TestEarleyStandard::test_start", "tests/test_parser.py::TestEarleyStandard::test_stringio_unicode", "tests/test_parser.py::TestEarleyStandard::test_templates", "tests/test_parser.py::TestEarleyStandard::test_templates_alias", "tests/test_parser.py::TestEarleyStandard::test_templates_import", "tests/test_parser.py::TestEarleyStandard::test_templates_modifiers", "tests/test_parser.py::TestEarleyStandard::test_templates_recursion", "tests/test_parser.py::TestEarleyStandard::test_templates_templates", "tests/test_parser.py::TestEarleyStandard::test_token_collision", "tests/test_parser.py::TestEarleyStandard::test_token_collision2", "tests/test_parser.py::TestEarleyStandard::test_token_collision_WS", "tests/test_parser.py::TestEarleyStandard::test_token_ebnf", "tests/test_parser.py::TestEarleyStandard::test_token_flags", "tests/test_parser.py::TestEarleyStandard::test_token_flags2", "tests/test_parser.py::TestEarleyStandard::test_token_flags3", "tests/test_parser.py::TestEarleyStandard::test_token_flags_verbose", "tests/test_parser.py::TestEarleyStandard::test_token_flags_verbose_multiline", "tests/test_parser.py::TestEarleyStandard::test_token_multiline_only_works_with_x_flag", "tests/test_parser.py::TestEarleyStandard::test_token_not_anon", "tests/test_parser.py::TestEarleyStandard::test_twice_empty", "tests/test_parser.py::TestEarleyStandard::test_undefined_ignore", "tests/test_parser.py::TestEarleyStandard::test_undefined_rule", "tests/test_parser.py::TestEarleyStandard::test_undefined_token", "tests/test_parser.py::TestEarleyStandard::test_unicode", "tests/test_parser.py::TestEarleyStandard::test_unicode2", "tests/test_parser.py::TestEarleyStandard::test_unicode3", "tests/test_parser.py::TestEarleyStandard::test_unicode_class", "tests/test_parser.py::TestEarleyStandard::test_unicode_literal_range_escape", "tests/test_parser.py::TestEarleyStandard::test_unicode_word", "tests/test_parser.py::TestEarleyStandard::test_utf8", "tests/test_parser.py::TestCykStandard::test_alias", "tests/test_parser.py::TestCykStandard::test_alias_in_terminal", "tests/test_parser.py::TestCykStandard::test_backslash", "tests/test_parser.py::TestCykStandard::test_backslash2", "tests/test_parser.py::TestCykStandard::test_basic1", "tests/test_parser.py::TestCykStandard::test_basic2", "tests/test_parser.py::TestCykStandard::test_bytes_utf8", "tests/test_parser.py::TestCykStandard::test_dont_expand1_lists_with_multiple_items", "tests/test_parser.py::TestCykStandard::test_dont_expand1_lists_with_multiple_items_2", "tests/test_parser.py::TestCykStandard::test_escaped_string", "tests/test_parser.py::TestCykStandard::test_expand1_lists_with_one_item", "tests/test_parser.py::TestCykStandard::test_expand1_lists_with_one_item_2", "tests/test_parser.py::TestCykStandard::test_float_without_lexer", "tests/test_parser.py::TestCykStandard::test_g_regex_flags", "tests/test_parser.py::TestCykStandard::test_hex_escape", "tests/test_parser.py::TestCykStandard::test_hex_literal_range_escape", "tests/test_parser.py::TestCykStandard::test_import", "tests/test_parser.py::TestCykStandard::test_import_errors", "tests/test_parser.py::TestCykStandard::test_import_rename", "tests/test_parser.py::TestCykStandard::test_join_regex_flags", "tests/test_parser.py::TestCykStandard::test_keep_all_tokens", "tests/test_parser.py::TestCykStandard::test_lexer_detect_newline_tokens", "tests/test_parser.py::TestCykStandard::test_lexer_prioritization", "tests/test_parser.py::TestCykStandard::test_lexer_token_limit", "tests/test_parser.py::TestCykStandard::test_line_and_column", "tests/test_parser.py::TestCykStandard::test_line_counting", "tests/test_parser.py::TestCykStandard::test_meddling_unused", "tests/test_parser.py::TestCykStandard::test_multi_import", "tests/test_parser.py::TestCykStandard::test_multi_start", "tests/test_parser.py::TestCykStandard::test_postlex_declare", "tests/test_parser.py::TestCykStandard::test_priority_vs_embedded", "tests/test_parser.py::TestCykStandard::test_ranged_repeat_rules", "tests/test_parser.py::TestCykStandard::test_ranged_repeat_terms", "tests/test_parser.py::TestCykStandard::test_reduce_cycle", "tests/test_parser.py::TestCykStandard::test_regex_escaping", "tests/test_parser.py::TestCykStandard::test_regex_quote", "tests/test_parser.py::TestCykStandard::test_relative_import", "tests/test_parser.py::TestCykStandard::test_relative_import_of_nested_grammar", "tests/test_parser.py::TestCykStandard::test_relative_import_preserves_leading_underscore", "tests/test_parser.py::TestCykStandard::test_relative_import_rename", "tests/test_parser.py::TestCykStandard::test_relative_import_rules_dependencies_imported_only_once", "tests/test_parser.py::TestCykStandard::test_relative_import_unicode", "tests/test_parser.py::TestCykStandard::test_relative_multi_import", "tests/test_parser.py::TestCykStandard::test_relative_rule_import", "tests/test_parser.py::TestCykStandard::test_relative_rule_import_drop_ignore", "tests/test_parser.py::TestCykStandard::test_relative_rule_import_rename", "tests/test_parser.py::TestCykStandard::test_relative_rule_import_subrule", "tests/test_parser.py::TestCykStandard::test_relative_rule_import_subrule_no_conflict", "tests/test_parser.py::TestCykStandard::test_rule_collision", "tests/test_parser.py::TestCykStandard::test_rule_collision2", "tests/test_parser.py::TestCykStandard::test_special_chars", "tests/test_parser.py::TestCykStandard::test_start", "tests/test_parser.py::TestCykStandard::test_stringio_unicode", "tests/test_parser.py::TestCykStandard::test_templates", "tests/test_parser.py::TestCykStandard::test_templates_alias", "tests/test_parser.py::TestCykStandard::test_templates_import", "tests/test_parser.py::TestCykStandard::test_templates_modifiers", "tests/test_parser.py::TestCykStandard::test_templates_recursion", "tests/test_parser.py::TestCykStandard::test_templates_templates", "tests/test_parser.py::TestCykStandard::test_token_collision", "tests/test_parser.py::TestCykStandard::test_token_collision2", "tests/test_parser.py::TestCykStandard::test_token_collision_WS", "tests/test_parser.py::TestCykStandard::test_token_ebnf", "tests/test_parser.py::TestCykStandard::test_token_flags", "tests/test_parser.py::TestCykStandard::test_token_flags2", "tests/test_parser.py::TestCykStandard::test_token_flags3", "tests/test_parser.py::TestCykStandard::test_token_flags_verbose", "tests/test_parser.py::TestCykStandard::test_token_flags_verbose_multiline", "tests/test_parser.py::TestCykStandard::test_token_multiline_only_works_with_x_flag", "tests/test_parser.py::TestCykStandard::test_token_not_anon", "tests/test_parser.py::TestCykStandard::test_undefined_ignore", "tests/test_parser.py::TestCykStandard::test_undefined_rule", "tests/test_parser.py::TestCykStandard::test_undefined_token", "tests/test_parser.py::TestCykStandard::test_unicode", "tests/test_parser.py::TestCykStandard::test_unicode2", "tests/test_parser.py::TestCykStandard::test_unicode3", "tests/test_parser.py::TestCykStandard::test_unicode_class", "tests/test_parser.py::TestCykStandard::test_unicode_literal_range_escape", "tests/test_parser.py::TestCykStandard::test_unicode_word", "tests/test_parser.py::TestCykStandard::test_utf8", "tests/test_parser.py::TestEarleyDynamic::test_alias", "tests/test_parser.py::TestEarleyDynamic::test_alias_in_terminal", "tests/test_parser.py::TestEarleyDynamic::test_backslash", "tests/test_parser.py::TestEarleyDynamic::test_backslash2", "tests/test_parser.py::TestEarleyDynamic::test_basic1", "tests/test_parser.py::TestEarleyDynamic::test_basic2", "tests/test_parser.py::TestEarleyDynamic::test_bytes_utf8", "tests/test_parser.py::TestEarleyDynamic::test_dont_expand1_lists_with_multiple_items", "tests/test_parser.py::TestEarleyDynamic::test_dont_expand1_lists_with_multiple_items_2", "tests/test_parser.py::TestEarleyDynamic::test_empty", "tests/test_parser.py::TestEarleyDynamic::test_empty_end", "tests/test_parser.py::TestEarleyDynamic::test_empty_expand1_list", "tests/test_parser.py::TestEarleyDynamic::test_empty_expand1_list_2", "tests/test_parser.py::TestEarleyDynamic::test_empty_flatten_list", "tests/test_parser.py::TestEarleyDynamic::test_escaped_string", "tests/test_parser.py::TestEarleyDynamic::test_expand1_lists_with_one_item", "tests/test_parser.py::TestEarleyDynamic::test_expand1_lists_with_one_item_2", "tests/test_parser.py::TestEarleyDynamic::test_float_without_lexer", "tests/test_parser.py::TestEarleyDynamic::test_g_regex_flags", "tests/test_parser.py::TestEarleyDynamic::test_hex_escape", "tests/test_parser.py::TestEarleyDynamic::test_hex_literal_range_escape", "tests/test_parser.py::TestEarleyDynamic::test_ignore", "tests/test_parser.py::TestEarleyDynamic::test_import", "tests/test_parser.py::TestEarleyDynamic::test_import_errors", "tests/test_parser.py::TestEarleyDynamic::test_import_rename", "tests/test_parser.py::TestEarleyDynamic::test_join_regex_flags", "tests/test_parser.py::TestEarleyDynamic::test_keep_all_tokens", "tests/test_parser.py::TestEarleyDynamic::test_lexer_detect_newline_tokens", "tests/test_parser.py::TestEarleyDynamic::test_lexer_token_limit", "tests/test_parser.py::TestEarleyDynamic::test_line_and_column", "tests/test_parser.py::TestEarleyDynamic::test_line_counting", "tests/test_parser.py::TestEarleyDynamic::test_maybe", "tests/test_parser.py::TestEarleyDynamic::test_maybe_placeholders", "tests/test_parser.py::TestEarleyDynamic::test_meddling_unused", "tests/test_parser.py::TestEarleyDynamic::test_multi_import", "tests/test_parser.py::TestEarleyDynamic::test_multi_start", "tests/test_parser.py::TestEarleyDynamic::test_prioritization", "tests/test_parser.py::TestEarleyDynamic::test_prioritization_sum", "tests/test_parser.py::TestEarleyDynamic::test_ranged_repeat_rules", "tests/test_parser.py::TestEarleyDynamic::test_ranged_repeat_terms", "tests/test_parser.py::TestEarleyDynamic::test_reduce_cycle", "tests/test_parser.py::TestEarleyDynamic::test_regex_escaping", "tests/test_parser.py::TestEarleyDynamic::test_regex_quote", "tests/test_parser.py::TestEarleyDynamic::test_relative_import", "tests/test_parser.py::TestEarleyDynamic::test_relative_import_of_nested_grammar", "tests/test_parser.py::TestEarleyDynamic::test_relative_import_preserves_leading_underscore", "tests/test_parser.py::TestEarleyDynamic::test_relative_import_rename", "tests/test_parser.py::TestEarleyDynamic::test_relative_import_rules_dependencies_imported_only_once", "tests/test_parser.py::TestEarleyDynamic::test_relative_import_unicode", "tests/test_parser.py::TestEarleyDynamic::test_relative_multi_import", "tests/test_parser.py::TestEarleyDynamic::test_relative_rule_import", "tests/test_parser.py::TestEarleyDynamic::test_relative_rule_import_drop_ignore", "tests/test_parser.py::TestEarleyDynamic::test_relative_rule_import_rename", "tests/test_parser.py::TestEarleyDynamic::test_relative_rule_import_subrule", "tests/test_parser.py::TestEarleyDynamic::test_relative_rule_import_subrule_no_conflict", "tests/test_parser.py::TestEarleyDynamic::test_rule_collision", "tests/test_parser.py::TestEarleyDynamic::test_rule_collision2", "tests/test_parser.py::TestEarleyDynamic::test_special_chars", "tests/test_parser.py::TestEarleyDynamic::test_stack_for_ebnf", "tests/test_parser.py::TestEarleyDynamic::test_start", "tests/test_parser.py::TestEarleyDynamic::test_stringio_unicode", "tests/test_parser.py::TestEarleyDynamic::test_templates", "tests/test_parser.py::TestEarleyDynamic::test_templates_alias", "tests/test_parser.py::TestEarleyDynamic::test_templates_import", "tests/test_parser.py::TestEarleyDynamic::test_templates_modifiers", "tests/test_parser.py::TestEarleyDynamic::test_templates_recursion", "tests/test_parser.py::TestEarleyDynamic::test_templates_templates", "tests/test_parser.py::TestEarleyDynamic::test_token_collision", "tests/test_parser.py::TestEarleyDynamic::test_token_collision2", "tests/test_parser.py::TestEarleyDynamic::test_token_collision_WS", "tests/test_parser.py::TestEarleyDynamic::test_token_ebnf", "tests/test_parser.py::TestEarleyDynamic::test_token_flags", "tests/test_parser.py::TestEarleyDynamic::test_token_flags2", "tests/test_parser.py::TestEarleyDynamic::test_token_flags3", "tests/test_parser.py::TestEarleyDynamic::test_token_flags_verbose", "tests/test_parser.py::TestEarleyDynamic::test_token_flags_verbose_multiline", "tests/test_parser.py::TestEarleyDynamic::test_token_multiline_only_works_with_x_flag", "tests/test_parser.py::TestEarleyDynamic::test_token_not_anon", "tests/test_parser.py::TestEarleyDynamic::test_twice_empty", "tests/test_parser.py::TestEarleyDynamic::test_undefined_ignore", "tests/test_parser.py::TestEarleyDynamic::test_undefined_rule", "tests/test_parser.py::TestEarleyDynamic::test_undefined_token", "tests/test_parser.py::TestEarleyDynamic::test_unicode", "tests/test_parser.py::TestEarleyDynamic::test_unicode2", "tests/test_parser.py::TestEarleyDynamic::test_unicode3", "tests/test_parser.py::TestEarleyDynamic::test_unicode_class", "tests/test_parser.py::TestEarleyDynamic::test_unicode_literal_range_escape", "tests/test_parser.py::TestEarleyDynamic::test_unicode_word", "tests/test_parser.py::TestEarleyDynamic::test_utf8", "tests/test_parser.py::TestEarleyDynamic_complete::test_alias", "tests/test_parser.py::TestEarleyDynamic_complete::test_alias_in_terminal", "tests/test_parser.py::TestEarleyDynamic_complete::test_backslash", "tests/test_parser.py::TestEarleyDynamic_complete::test_backslash2", "tests/test_parser.py::TestEarleyDynamic_complete::test_basic1", "tests/test_parser.py::TestEarleyDynamic_complete::test_basic2", "tests/test_parser.py::TestEarleyDynamic_complete::test_bytes_utf8", "tests/test_parser.py::TestEarleyDynamic_complete::test_dont_expand1_lists_with_multiple_items", "tests/test_parser.py::TestEarleyDynamic_complete::test_dont_expand1_lists_with_multiple_items_2", "tests/test_parser.py::TestEarleyDynamic_complete::test_empty", "tests/test_parser.py::TestEarleyDynamic_complete::test_empty_end", "tests/test_parser.py::TestEarleyDynamic_complete::test_empty_expand1_list", "tests/test_parser.py::TestEarleyDynamic_complete::test_empty_expand1_list_2", "tests/test_parser.py::TestEarleyDynamic_complete::test_empty_flatten_list", "tests/test_parser.py::TestEarleyDynamic_complete::test_escaped_string", "tests/test_parser.py::TestEarleyDynamic_complete::test_expand1_lists_with_one_item", "tests/test_parser.py::TestEarleyDynamic_complete::test_expand1_lists_with_one_item_2", "tests/test_parser.py::TestEarleyDynamic_complete::test_float_without_lexer", "tests/test_parser.py::TestEarleyDynamic_complete::test_g_regex_flags", "tests/test_parser.py::TestEarleyDynamic_complete::test_hex_escape", "tests/test_parser.py::TestEarleyDynamic_complete::test_hex_literal_range_escape", "tests/test_parser.py::TestEarleyDynamic_complete::test_ignore", "tests/test_parser.py::TestEarleyDynamic_complete::test_import", "tests/test_parser.py::TestEarleyDynamic_complete::test_import_errors", "tests/test_parser.py::TestEarleyDynamic_complete::test_import_rename", "tests/test_parser.py::TestEarleyDynamic_complete::test_join_regex_flags", "tests/test_parser.py::TestEarleyDynamic_complete::test_keep_all_tokens", "tests/test_parser.py::TestEarleyDynamic_complete::test_lexer_detect_newline_tokens", "tests/test_parser.py::TestEarleyDynamic_complete::test_lexer_token_limit", "tests/test_parser.py::TestEarleyDynamic_complete::test_line_and_column", "tests/test_parser.py::TestEarleyDynamic_complete::test_line_counting", "tests/test_parser.py::TestEarleyDynamic_complete::test_maybe", "tests/test_parser.py::TestEarleyDynamic_complete::test_maybe_placeholders", "tests/test_parser.py::TestEarleyDynamic_complete::test_meddling_unused", "tests/test_parser.py::TestEarleyDynamic_complete::test_multi_import", "tests/test_parser.py::TestEarleyDynamic_complete::test_multi_start", "tests/test_parser.py::TestEarleyDynamic_complete::test_prioritization", "tests/test_parser.py::TestEarleyDynamic_complete::test_prioritization_sum", "tests/test_parser.py::TestEarleyDynamic_complete::test_ranged_repeat_rules", "tests/test_parser.py::TestEarleyDynamic_complete::test_ranged_repeat_terms", "tests/test_parser.py::TestEarleyDynamic_complete::test_reduce_cycle", "tests/test_parser.py::TestEarleyDynamic_complete::test_regex_escaping", "tests/test_parser.py::TestEarleyDynamic_complete::test_regex_quote", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_import", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_import_of_nested_grammar", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_import_preserves_leading_underscore", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_import_rename", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_import_rules_dependencies_imported_only_once", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_import_unicode", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_multi_import", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_rule_import", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_rule_import_drop_ignore", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_rule_import_rename", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_rule_import_subrule", "tests/test_parser.py::TestEarleyDynamic_complete::test_relative_rule_import_subrule_no_conflict", "tests/test_parser.py::TestEarleyDynamic_complete::test_rule_collision", "tests/test_parser.py::TestEarleyDynamic_complete::test_rule_collision2", "tests/test_parser.py::TestEarleyDynamic_complete::test_special_chars", "tests/test_parser.py::TestEarleyDynamic_complete::test_stack_for_ebnf", "tests/test_parser.py::TestEarleyDynamic_complete::test_start", "tests/test_parser.py::TestEarleyDynamic_complete::test_stringio_unicode", "tests/test_parser.py::TestEarleyDynamic_complete::test_templates", "tests/test_parser.py::TestEarleyDynamic_complete::test_templates_alias", "tests/test_parser.py::TestEarleyDynamic_complete::test_templates_import", "tests/test_parser.py::TestEarleyDynamic_complete::test_templates_modifiers", "tests/test_parser.py::TestEarleyDynamic_complete::test_templates_recursion", "tests/test_parser.py::TestEarleyDynamic_complete::test_templates_templates", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_collision", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_collision2", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_collision_WS", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_ebnf", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_flags", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_flags2", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_flags3", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_flags_verbose", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_flags_verbose_multiline", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_multiline_only_works_with_x_flag", "tests/test_parser.py::TestEarleyDynamic_complete::test_token_not_anon", "tests/test_parser.py::TestEarleyDynamic_complete::test_twice_empty", "tests/test_parser.py::TestEarleyDynamic_complete::test_undefined_ignore", "tests/test_parser.py::TestEarleyDynamic_complete::test_undefined_rule", "tests/test_parser.py::TestEarleyDynamic_complete::test_undefined_token", "tests/test_parser.py::TestEarleyDynamic_complete::test_unicode", "tests/test_parser.py::TestEarleyDynamic_complete::test_unicode2", "tests/test_parser.py::TestEarleyDynamic_complete::test_unicode3", "tests/test_parser.py::TestEarleyDynamic_complete::test_unicode_class", "tests/test_parser.py::TestEarleyDynamic_complete::test_unicode_literal_range_escape", "tests/test_parser.py::TestEarleyDynamic_complete::test_unicode_word", "tests/test_parser.py::TestEarleyDynamic_complete::test_utf8", "tests/test_parser.py::TestLalrStandard::test_alias", "tests/test_parser.py::TestLalrStandard::test_alias_in_terminal", "tests/test_parser.py::TestLalrStandard::test_backslash", "tests/test_parser.py::TestLalrStandard::test_backslash2", "tests/test_parser.py::TestLalrStandard::test_basic1", "tests/test_parser.py::TestLalrStandard::test_basic2", "tests/test_parser.py::TestLalrStandard::test_bytes_utf8", "tests/test_parser.py::TestLalrStandard::test_dont_expand1_lists_with_multiple_items", "tests/test_parser.py::TestLalrStandard::test_dont_expand1_lists_with_multiple_items_2", "tests/test_parser.py::TestLalrStandard::test_empty", "tests/test_parser.py::TestLalrStandard::test_empty_end", "tests/test_parser.py::TestLalrStandard::test_empty_expand1_list", "tests/test_parser.py::TestLalrStandard::test_empty_expand1_list_2", "tests/test_parser.py::TestLalrStandard::test_empty_flatten_list", "tests/test_parser.py::TestLalrStandard::test_error_with_puppet", "tests/test_parser.py::TestLalrStandard::test_escaped_string", "tests/test_parser.py::TestLalrStandard::test_expand1_lists_with_one_item", "tests/test_parser.py::TestLalrStandard::test_expand1_lists_with_one_item_2", "tests/test_parser.py::TestLalrStandard::test_float_without_lexer", "tests/test_parser.py::TestLalrStandard::test_g_regex_flags", "tests/test_parser.py::TestLalrStandard::test_hex_escape", "tests/test_parser.py::TestLalrStandard::test_hex_literal_range_escape", "tests/test_parser.py::TestLalrStandard::test_ignore", "tests/test_parser.py::TestLalrStandard::test_import", "tests/test_parser.py::TestLalrStandard::test_import_errors", "tests/test_parser.py::TestLalrStandard::test_import_rename", "tests/test_parser.py::TestLalrStandard::test_join_regex_flags", "tests/test_parser.py::TestLalrStandard::test_keep_all_tokens", "tests/test_parser.py::TestLalrStandard::test_lexer_detect_newline_tokens", "tests/test_parser.py::TestLalrStandard::test_lexer_prioritization", "tests/test_parser.py::TestLalrStandard::test_lexer_token_limit", "tests/test_parser.py::TestLalrStandard::test_line_and_column", "tests/test_parser.py::TestLalrStandard::test_line_counting", "tests/test_parser.py::TestLalrStandard::test_maybe", "tests/test_parser.py::TestLalrStandard::test_maybe_placeholders", "tests/test_parser.py::TestLalrStandard::test_meddling_unused", "tests/test_parser.py::TestLalrStandard::test_multi_import", "tests/test_parser.py::TestLalrStandard::test_multi_start", "tests/test_parser.py::TestLalrStandard::test_postlex_declare", "tests/test_parser.py::TestLalrStandard::test_prioritization", "tests/test_parser.py::TestLalrStandard::test_priority_vs_embedded", "tests/test_parser.py::TestLalrStandard::test_ranged_repeat_rules", "tests/test_parser.py::TestLalrStandard::test_ranged_repeat_terms", "tests/test_parser.py::TestLalrStandard::test_reduce_cycle", "tests/test_parser.py::TestLalrStandard::test_regex_escaping", "tests/test_parser.py::TestLalrStandard::test_regex_quote", "tests/test_parser.py::TestLalrStandard::test_relative_import", "tests/test_parser.py::TestLalrStandard::test_relative_import_of_nested_grammar", "tests/test_parser.py::TestLalrStandard::test_relative_import_preserves_leading_underscore", "tests/test_parser.py::TestLalrStandard::test_relative_import_rename", "tests/test_parser.py::TestLalrStandard::test_relative_import_rules_dependencies_imported_only_once", "tests/test_parser.py::TestLalrStandard::test_relative_import_unicode", "tests/test_parser.py::TestLalrStandard::test_relative_multi_import", "tests/test_parser.py::TestLalrStandard::test_relative_rule_import", "tests/test_parser.py::TestLalrStandard::test_relative_rule_import_drop_ignore", "tests/test_parser.py::TestLalrStandard::test_relative_rule_import_rename", "tests/test_parser.py::TestLalrStandard::test_relative_rule_import_subrule", "tests/test_parser.py::TestLalrStandard::test_relative_rule_import_subrule_no_conflict", "tests/test_parser.py::TestLalrStandard::test_rule_collision", "tests/test_parser.py::TestLalrStandard::test_rule_collision2", "tests/test_parser.py::TestLalrStandard::test_serialize", "tests/test_parser.py::TestLalrStandard::test_special_chars", "tests/test_parser.py::TestLalrStandard::test_stack_for_ebnf", "tests/test_parser.py::TestLalrStandard::test_start", "tests/test_parser.py::TestLalrStandard::test_stringio_unicode", "tests/test_parser.py::TestLalrStandard::test_templates", "tests/test_parser.py::TestLalrStandard::test_templates_alias", "tests/test_parser.py::TestLalrStandard::test_templates_import", "tests/test_parser.py::TestLalrStandard::test_templates_modifiers", "tests/test_parser.py::TestLalrStandard::test_templates_recursion", "tests/test_parser.py::TestLalrStandard::test_templates_templates", "tests/test_parser.py::TestLalrStandard::test_token_collision", "tests/test_parser.py::TestLalrStandard::test_token_collision2", "tests/test_parser.py::TestLalrStandard::test_token_collision_WS", "tests/test_parser.py::TestLalrStandard::test_token_ebnf", "tests/test_parser.py::TestLalrStandard::test_token_flags", "tests/test_parser.py::TestLalrStandard::test_token_flags2", "tests/test_parser.py::TestLalrStandard::test_token_flags3", "tests/test_parser.py::TestLalrStandard::test_token_flags_verbose", "tests/test_parser.py::TestLalrStandard::test_token_flags_verbose_multiline", "tests/test_parser.py::TestLalrStandard::test_token_multiline_only_works_with_x_flag", "tests/test_parser.py::TestLalrStandard::test_token_not_anon", "tests/test_parser.py::TestLalrStandard::test_twice_empty", "tests/test_parser.py::TestLalrStandard::test_undefined_ignore", "tests/test_parser.py::TestLalrStandard::test_undefined_rule", "tests/test_parser.py::TestLalrStandard::test_undefined_token", "tests/test_parser.py::TestLalrStandard::test_unicode", "tests/test_parser.py::TestLalrStandard::test_unicode2", "tests/test_parser.py::TestLalrStandard::test_unicode3", "tests/test_parser.py::TestLalrStandard::test_unicode_class", "tests/test_parser.py::TestLalrStandard::test_unicode_literal_range_escape", "tests/test_parser.py::TestLalrStandard::test_unicode_word", "tests/test_parser.py::TestLalrStandard::test_utf8", "tests/test_parser.py::TestLalrContextual::test_alias", "tests/test_parser.py::TestLalrContextual::test_alias_in_terminal", "tests/test_parser.py::TestLalrContextual::test_backslash", "tests/test_parser.py::TestLalrContextual::test_backslash2", "tests/test_parser.py::TestLalrContextual::test_basic1", "tests/test_parser.py::TestLalrContextual::test_basic2", "tests/test_parser.py::TestLalrContextual::test_bytes_utf8", "tests/test_parser.py::TestLalrContextual::test_dont_expand1_lists_with_multiple_items", "tests/test_parser.py::TestLalrContextual::test_dont_expand1_lists_with_multiple_items_2", "tests/test_parser.py::TestLalrContextual::test_empty", "tests/test_parser.py::TestLalrContextual::test_empty_end", "tests/test_parser.py::TestLalrContextual::test_empty_expand1_list", "tests/test_parser.py::TestLalrContextual::test_empty_expand1_list_2", "tests/test_parser.py::TestLalrContextual::test_empty_flatten_list", "tests/test_parser.py::TestLalrContextual::test_error_with_puppet", "tests/test_parser.py::TestLalrContextual::test_escaped_string", "tests/test_parser.py::TestLalrContextual::test_expand1_lists_with_one_item", "tests/test_parser.py::TestLalrContextual::test_expand1_lists_with_one_item_2", "tests/test_parser.py::TestLalrContextual::test_float_without_lexer", "tests/test_parser.py::TestLalrContextual::test_g_regex_flags", "tests/test_parser.py::TestLalrContextual::test_hex_escape", "tests/test_parser.py::TestLalrContextual::test_hex_literal_range_escape", "tests/test_parser.py::TestLalrContextual::test_ignore", "tests/test_parser.py::TestLalrContextual::test_import", "tests/test_parser.py::TestLalrContextual::test_import_errors", "tests/test_parser.py::TestLalrContextual::test_import_rename", "tests/test_parser.py::TestLalrContextual::test_join_regex_flags", "tests/test_parser.py::TestLalrContextual::test_keep_all_tokens", "tests/test_parser.py::TestLalrContextual::test_lexer_detect_newline_tokens", "tests/test_parser.py::TestLalrContextual::test_lexer_token_limit", "tests/test_parser.py::TestLalrContextual::test_line_and_column", "tests/test_parser.py::TestLalrContextual::test_line_counting", "tests/test_parser.py::TestLalrContextual::test_maybe", "tests/test_parser.py::TestLalrContextual::test_maybe_placeholders", "tests/test_parser.py::TestLalrContextual::test_meddling_unused", "tests/test_parser.py::TestLalrContextual::test_multi_import", "tests/test_parser.py::TestLalrContextual::test_multi_start", "tests/test_parser.py::TestLalrContextual::test_postlex_declare", "tests/test_parser.py::TestLalrContextual::test_prioritization", "tests/test_parser.py::TestLalrContextual::test_priority_vs_embedded", "tests/test_parser.py::TestLalrContextual::test_ranged_repeat_rules", "tests/test_parser.py::TestLalrContextual::test_ranged_repeat_terms", "tests/test_parser.py::TestLalrContextual::test_reduce_cycle", "tests/test_parser.py::TestLalrContextual::test_regex_escaping", "tests/test_parser.py::TestLalrContextual::test_regex_quote", "tests/test_parser.py::TestLalrContextual::test_relative_import", "tests/test_parser.py::TestLalrContextual::test_relative_import_of_nested_grammar", "tests/test_parser.py::TestLalrContextual::test_relative_import_preserves_leading_underscore", "tests/test_parser.py::TestLalrContextual::test_relative_import_rename", "tests/test_parser.py::TestLalrContextual::test_relative_import_rules_dependencies_imported_only_once", "tests/test_parser.py::TestLalrContextual::test_relative_import_unicode", "tests/test_parser.py::TestLalrContextual::test_relative_multi_import", "tests/test_parser.py::TestLalrContextual::test_relative_rule_import", "tests/test_parser.py::TestLalrContextual::test_relative_rule_import_drop_ignore", "tests/test_parser.py::TestLalrContextual::test_relative_rule_import_rename", "tests/test_parser.py::TestLalrContextual::test_relative_rule_import_subrule", "tests/test_parser.py::TestLalrContextual::test_relative_rule_import_subrule_no_conflict", "tests/test_parser.py::TestLalrContextual::test_rule_collision", "tests/test_parser.py::TestLalrContextual::test_rule_collision2", "tests/test_parser.py::TestLalrContextual::test_serialize", "tests/test_parser.py::TestLalrContextual::test_special_chars", "tests/test_parser.py::TestLalrContextual::test_stack_for_ebnf", "tests/test_parser.py::TestLalrContextual::test_start", "tests/test_parser.py::TestLalrContextual::test_stringio_unicode", "tests/test_parser.py::TestLalrContextual::test_templates", "tests/test_parser.py::TestLalrContextual::test_templates_alias", "tests/test_parser.py::TestLalrContextual::test_templates_import", "tests/test_parser.py::TestLalrContextual::test_templates_modifiers", "tests/test_parser.py::TestLalrContextual::test_templates_recursion", "tests/test_parser.py::TestLalrContextual::test_templates_templates", "tests/test_parser.py::TestLalrContextual::test_token_collision", "tests/test_parser.py::TestLalrContextual::test_token_collision2", "tests/test_parser.py::TestLalrContextual::test_token_collision_WS", "tests/test_parser.py::TestLalrContextual::test_token_ebnf", "tests/test_parser.py::TestLalrContextual::test_token_flags", "tests/test_parser.py::TestLalrContextual::test_token_flags2", "tests/test_parser.py::TestLalrContextual::test_token_flags3", "tests/test_parser.py::TestLalrContextual::test_token_flags_verbose", "tests/test_parser.py::TestLalrContextual::test_token_flags_verbose_multiline", "tests/test_parser.py::TestLalrContextual::test_token_multiline_only_works_with_x_flag", "tests/test_parser.py::TestLalrContextual::test_token_not_anon", "tests/test_parser.py::TestLalrContextual::test_twice_empty", "tests/test_parser.py::TestLalrContextual::test_undefined_ignore", "tests/test_parser.py::TestLalrContextual::test_undefined_rule", "tests/test_parser.py::TestLalrContextual::test_undefined_token", "tests/test_parser.py::TestLalrContextual::test_unicode", "tests/test_parser.py::TestLalrContextual::test_unicode2", "tests/test_parser.py::TestLalrContextual::test_unicode3", "tests/test_parser.py::TestLalrContextual::test_unicode_class", "tests/test_parser.py::TestLalrContextual::test_unicode_literal_range_escape", "tests/test_parser.py::TestLalrContextual::test_unicode_word", "tests/test_parser.py::TestLalrContextual::test_utf8", "tests/test_parser.py::TestLalrCustom::test_alias", "tests/test_parser.py::TestLalrCustom::test_alias_in_terminal", "tests/test_parser.py::TestLalrCustom::test_backslash", "tests/test_parser.py::TestLalrCustom::test_backslash2", "tests/test_parser.py::TestLalrCustom::test_basic1", "tests/test_parser.py::TestLalrCustom::test_basic2", "tests/test_parser.py::TestLalrCustom::test_bytes_utf8", "tests/test_parser.py::TestLalrCustom::test_dont_expand1_lists_with_multiple_items", "tests/test_parser.py::TestLalrCustom::test_dont_expand1_lists_with_multiple_items_2", "tests/test_parser.py::TestLalrCustom::test_empty", "tests/test_parser.py::TestLalrCustom::test_empty_end", "tests/test_parser.py::TestLalrCustom::test_empty_expand1_list", "tests/test_parser.py::TestLalrCustom::test_empty_expand1_list_2", "tests/test_parser.py::TestLalrCustom::test_empty_flatten_list", "tests/test_parser.py::TestLalrCustom::test_error_with_puppet", "tests/test_parser.py::TestLalrCustom::test_escaped_string", "tests/test_parser.py::TestLalrCustom::test_expand1_lists_with_one_item", "tests/test_parser.py::TestLalrCustom::test_expand1_lists_with_one_item_2", "tests/test_parser.py::TestLalrCustom::test_float_without_lexer", "tests/test_parser.py::TestLalrCustom::test_g_regex_flags", "tests/test_parser.py::TestLalrCustom::test_hex_escape", "tests/test_parser.py::TestLalrCustom::test_hex_literal_range_escape", "tests/test_parser.py::TestLalrCustom::test_ignore", "tests/test_parser.py::TestLalrCustom::test_import", "tests/test_parser.py::TestLalrCustom::test_import_errors", "tests/test_parser.py::TestLalrCustom::test_import_rename", "tests/test_parser.py::TestLalrCustom::test_join_regex_flags", "tests/test_parser.py::TestLalrCustom::test_keep_all_tokens", "tests/test_parser.py::TestLalrCustom::test_lexer_detect_newline_tokens", "tests/test_parser.py::TestLalrCustom::test_lexer_token_limit", "tests/test_parser.py::TestLalrCustom::test_line_and_column", "tests/test_parser.py::TestLalrCustom::test_line_counting", "tests/test_parser.py::TestLalrCustom::test_maybe", "tests/test_parser.py::TestLalrCustom::test_maybe_placeholders", "tests/test_parser.py::TestLalrCustom::test_meddling_unused", "tests/test_parser.py::TestLalrCustom::test_multi_import", "tests/test_parser.py::TestLalrCustom::test_multi_start", "tests/test_parser.py::TestLalrCustom::test_postlex_declare", "tests/test_parser.py::TestLalrCustom::test_prioritization", "tests/test_parser.py::TestLalrCustom::test_priority_vs_embedded", "tests/test_parser.py::TestLalrCustom::test_ranged_repeat_rules", "tests/test_parser.py::TestLalrCustom::test_ranged_repeat_terms", "tests/test_parser.py::TestLalrCustom::test_reduce_cycle", "tests/test_parser.py::TestLalrCustom::test_regex_escaping", "tests/test_parser.py::TestLalrCustom::test_regex_quote", "tests/test_parser.py::TestLalrCustom::test_relative_import", "tests/test_parser.py::TestLalrCustom::test_relative_import_of_nested_grammar", "tests/test_parser.py::TestLalrCustom::test_relative_import_preserves_leading_underscore", "tests/test_parser.py::TestLalrCustom::test_relative_import_rename", "tests/test_parser.py::TestLalrCustom::test_relative_import_rules_dependencies_imported_only_once", "tests/test_parser.py::TestLalrCustom::test_relative_import_unicode", "tests/test_parser.py::TestLalrCustom::test_relative_multi_import", "tests/test_parser.py::TestLalrCustom::test_relative_rule_import", "tests/test_parser.py::TestLalrCustom::test_relative_rule_import_drop_ignore", "tests/test_parser.py::TestLalrCustom::test_relative_rule_import_rename", "tests/test_parser.py::TestLalrCustom::test_relative_rule_import_subrule", "tests/test_parser.py::TestLalrCustom::test_relative_rule_import_subrule_no_conflict", "tests/test_parser.py::TestLalrCustom::test_rule_collision", "tests/test_parser.py::TestLalrCustom::test_rule_collision2", "tests/test_parser.py::TestLalrCustom::test_special_chars", "tests/test_parser.py::TestLalrCustom::test_stack_for_ebnf", "tests/test_parser.py::TestLalrCustom::test_start", "tests/test_parser.py::TestLalrCustom::test_stringio_unicode", "tests/test_parser.py::TestLalrCustom::test_templates", "tests/test_parser.py::TestLalrCustom::test_templates_alias", "tests/test_parser.py::TestLalrCustom::test_templates_import", "tests/test_parser.py::TestLalrCustom::test_templates_modifiers", "tests/test_parser.py::TestLalrCustom::test_templates_recursion", "tests/test_parser.py::TestLalrCustom::test_templates_templates", "tests/test_parser.py::TestLalrCustom::test_token_collision", "tests/test_parser.py::TestLalrCustom::test_token_collision2", "tests/test_parser.py::TestLalrCustom::test_token_collision_WS", "tests/test_parser.py::TestLalrCustom::test_token_ebnf", "tests/test_parser.py::TestLalrCustom::test_token_flags", "tests/test_parser.py::TestLalrCustom::test_token_flags2", "tests/test_parser.py::TestLalrCustom::test_token_flags3", "tests/test_parser.py::TestLalrCustom::test_token_flags_verbose", "tests/test_parser.py::TestLalrCustom::test_token_flags_verbose_multiline", "tests/test_parser.py::TestLalrCustom::test_token_multiline_only_works_with_x_flag", "tests/test_parser.py::TestLalrCustom::test_token_not_anon", "tests/test_parser.py::TestLalrCustom::test_twice_empty", "tests/test_parser.py::TestLalrCustom::test_undefined_ignore", "tests/test_parser.py::TestLalrCustom::test_undefined_rule", "tests/test_parser.py::TestLalrCustom::test_undefined_token", "tests/test_parser.py::TestLalrCustom::test_unicode", "tests/test_parser.py::TestLalrCustom::test_unicode2", "tests/test_parser.py::TestLalrCustom::test_unicode3", "tests/test_parser.py::TestLalrCustom::test_unicode_class", "tests/test_parser.py::TestLalrCustom::test_unicode_literal_range_escape", "tests/test_parser.py::TestLalrCustom::test_unicode_word", "tests/test_parser.py::TestLalrCustom::test_utf8", "tests/test_parser.py::TestFullEarleyDynamic::test_ambiguity1", "tests/test_parser.py::TestFullEarleyDynamic::test_ambiguity2", "tests/test_parser.py::TestFullEarleyDynamic::test_ambiguous_intermediate_node", "tests/test_parser.py::TestFullEarleyDynamic::test_ambiguous_intermediate_node_conditionally_inlined_rule", "tests/test_parser.py::TestFullEarleyDynamic::test_ambiguous_intermediate_node_inlined_rule", "tests/test_parser.py::TestFullEarleyDynamic::test_ambiguous_intermediate_node_unnamed_token", "tests/test_parser.py::TestFullEarleyDynamic::test_ambiguous_symbol_and_intermediate_nodes", "tests/test_parser.py::TestFullEarleyDynamic::test_anon", "tests/test_parser.py::TestFullEarleyDynamic::test_earley", "tests/test_parser.py::TestFullEarleyDynamic::test_earley2", "tests/test_parser.py::TestFullEarleyDynamic::test_earley4", "tests/test_parser.py::TestFullEarleyDynamic::test_earley_explicit_ambiguity", "tests/test_parser.py::TestFullEarleyDynamic::test_earley_repeating_empty", "tests/test_parser.py::TestFullEarleyDynamic::test_fruitflies_ambig", "tests/test_parser.py::TestFullEarleyDynamic::test_nested_ambiguous_intermediate_nodes", "tests/test_parser.py::TestFullEarleyDynamic::test_nested_ambiguous_intermediate_nodes2", "tests/test_parser.py::TestFullEarleyDynamic::test_term_ambig_resolve", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_ambiguity1", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_ambiguity2", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_ambiguous_intermediate_node", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_ambiguous_intermediate_node_conditionally_inlined_rule", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_ambiguous_intermediate_node_inlined_rule", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_ambiguous_intermediate_node_unnamed_token", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_ambiguous_symbol_and_intermediate_nodes", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_anon", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_earley", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_earley2", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_earley3", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_earley4", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_earley_explicit_ambiguity", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_earley_repeating_empty", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_explicit_ambiguity2", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_fruitflies_ambig", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_nested_ambiguous_intermediate_nodes", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_nested_ambiguous_intermediate_nodes2", "tests/test_parser.py::TestFullEarleyDynamic_complete::test_term_ambig_resolve" ]
[]
MIT License
8,880
1,318
[ "lark/parsers/earley.py", "lark/parsers/earley_forest.py" ]
python-metar__python-metar-120
f0af09e5685083cd3cabe4fb44a28777b95b118a
2020-11-05 14:34:10
2f1be43f23f407152b43a4c2e290dc2dc77029ec
akrherz: Thanks for this contribution. This seems good to me, but I am not well versed in METAR runway state encoding. Will see what others chime in with prior to merging.
diff --git a/metar/Metar.py b/metar/Metar.py index 5ef2d76..e5d113d 100644 --- a/metar/Metar.py +++ b/metar/Metar.py @@ -109,12 +109,13 @@ COLOR_RE = re.compile( re.VERBOSE, ) RUNWAYSTATE_RE = re.compile( - r"""((?P<name>\d\d) | R(?P<namenew>\d\d)(RR?|LL?|C)?/?) + r"""((?P<snoclo>R/SNOCLO) | + ((?P<name>\d\d) | R(?P<namenew>\d\d)(RR?|LL?|C)?/?) ((?P<special> SNOCLO|CLRD(\d\d|//)) | (?P<deposit>(\d|/)) (?P<extent>(\d|/)) (?P<depth>(\d\d|//)) - (?P<friction>(\d\d|//)))\s+""", + (?P<friction>(\d\d|//))))\s+""", re.VERBOSE, ) TREND_RE = re.compile(r"^(?P<trend>TEMPO|BECMG|FCST|NOSIG)\s+")
Unparsed groups in body 'R/SNOCLO' The [Manual on Codes](https://library.wmo.int/doc_num.php?explnum_id=10235) states > The state of the runway group shall be replaced by the abbreviation R/SNOCLO when the aerodrome is closed due to extreme deposit of snow. Note that there is no runway designator, just the letter "R". However, the runway state regex for parsing the record requires a runway number. This leads to the mentioned error. Test record: > METAR EDDC 032220Z 31005KT 5000 -SN BKN008 M01/M01 Q1020 R/SNOCLO
python-metar/python-metar
diff --git a/test/test_metar.py b/test/test_metar.py index 00bd05f..d6818a1 100644 --- a/test/test_metar.py +++ b/test/test_metar.py @@ -556,6 +556,11 @@ def test_290_ranway_state(): assert report("09SNOCLO").remarks() == "" assert report("09CLRD//").remarks() == "" + assert report("R/SNOCLO").remarks() == "" + assert report("R09/CLRD//").remarks() == "" + + assert report("R01R/SNOCLO ").remarks() == "" + def test_300_parseTrend(): """Check parsing of trend forecasts."""
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
1.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 -e git+https://github.com/python-metar/python-metar.git@f0af09e5685083cd3cabe4fb44a28777b95b118a#egg=metar packaging==24.2 pluggy==1.5.0 pytest==8.3.5 tomli==2.2.1
name: python-metar channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/python-metar
[ "test/test_metar.py::test_290_ranway_state" ]
[]
[ "test/test_metar.py::test_xlate_loc", "test/test_metar.py::test_module", "test/test_metar.py::test_issue114_multiplebecominggroups", "test/test_metar.py::test_issue84_trimequals[]", "test/test_metar.py::test_issue84_trimequals[=]", "test/test_metar.py::test_issue84_trimequals[=", "test/test_metar.py::test_issue77_ice_accretion[1]", "test/test_metar.py::test_issue77_ice_accretion[3]", "test/test_metar.py::test_issue77_ice_accretion[6]", "test/test_metar.py::test_issue64_cloudkeyerror", "test/test_metar.py::test_issue67_precip_text", "test/test_metar.py::test_issue40_runwayunits", "test/test_metar.py::test_issue107_runwayunits", "test/test_metar.py::test_issue26_runway_slashes[R28L/////]", "test/test_metar.py::test_issue26_runway_slashes[R28L/////FT]", "test/test_metar.py::test_issue26_runway_slashes[R28L//////]", "test/test_metar.py::test_issue26_runway_slashes[R28L/////N]", "test/test_metar.py::test_010_parseType_default", "test/test_metar.py::test_011_parseType_legal", "test/test_metar.py::test_020_parseStation_legal", "test/test_metar.py::test_021_parseStation_illegal", "test/test_metar.py::test_030_parseTime_legal", "test/test_metar.py::test_031_parseTime_specify_year", "test/test_metar.py::test_032_parseTime_specify_month", "test/test_metar.py::test_033_parseTime_auto_month", "test/test_metar.py::test_034_parseTime_auto_year", "test/test_metar.py::test_035_parseTime_suppress_auto_month", "test/test_metar.py::test_040_parseModifier_default", "test/test_metar.py::test_041_parseModifier", "test/test_metar.py::test_042_parseModifier_nonstd", "test/test_metar.py::test_043_parseModifier_illegal", "test/test_metar.py::test_140_parseWind", "test/test_metar.py::test_141_parseWind_nonstd", "test/test_metar.py::test_issue139_no_wind_unit", "test/test_metar.py::test_issue51_strict", "test/test_metar.py::test_142_parseWind_illegal", "test/test_metar.py::test_150_parseVisibility", "test/test_metar.py::test_151_parseVisibility_direction", "test/test_metar.py::test_152_parseVisibility_with_following_temperature", "test/test_metar.py::test_300_parseTrend", "test/test_metar.py::test_snowdepth", "test/test_metar.py::test_310_parse_sky_conditions", "test/test_metar.py::test_not_strict_mode", "test/test_metar.py::test_cor_auto_mod", "test/test_metar.py::test_slp_outside_remarks", "test/test_metar.py::test_wind_after_sky", "test/test_metar.py::test_issue136_temperature", "test/test_metar.py::test_windshear_runway_identifier" ]
[]
BSD License
8,883
309
[ "metar/Metar.py" ]
marshmallow-code__webargs-555
60a4a27143b4844294eb80fa3e8e29653d8f5a5f
2020-11-05 20:57:01
e5aff1f4ba4b1505e749732de699c27d01d67afc
diff --git a/src/webargs/falconparser.py b/src/webargs/falconparser.py index 5b4a21f..d2eb448 100644 --- a/src/webargs/falconparser.py +++ b/src/webargs/falconparser.py @@ -3,6 +3,8 @@ import falcon from falcon.util.uri import parse_query_string +import marshmallow as ma + from webargs import core from webargs.multidictproxy import MultiDictProxy @@ -69,7 +71,21 @@ class HTTPError(falcon.HTTPError): class FalconParser(core.Parser): - """Falcon request argument parser.""" + """Falcon request argument parser. + + Defaults to using the `media` location. See :py:meth:`~FalconParser.load_media` for + details on the media location.""" + + # by default, Falcon will use the 'media' location to load data + # + # this effectively looks the same as loading JSON data by default, but if + # you add a handler for a different media type to Falcon, webargs will + # automatically pick up on that capability + DEFAULT_LOCATION = "media" + DEFAULT_UNKNOWN_BY_LOCATION = dict( + media=ma.RAISE, **core.Parser.DEFAULT_UNKNOWN_BY_LOCATION + ) + __location_map__ = dict(media="load_media", **core.Parser.__location_map__) # Note on the use of MultiDictProxy throughout: # Falcon parses query strings and form values into ordinary dicts, but with @@ -95,6 +111,25 @@ class FalconParser(core.Parser): return form return MultiDictProxy(form, schema) + def load_media(self, req, schema): + """Return data unpacked and parsed by one of Falcon's media handlers. + By default, Falcon only handles JSON payloads. + + To configure additional media handlers, see the + `Falcon documentation on media types`__. + + .. _FalconMedia: https://falcon.readthedocs.io/en/stable/api/media.html + __ FalconMedia_ + + .. note:: + + The request stream will be read and left at EOF. + """ + # if there is no body, return missing instead of erroring + if req.content_length in (None, 0): + return core.missing + return req.media + def _raw_load_json(self, req): """Return a json payload from the request for the core parser's load_json
FalconParser should ideally support falcon's native media decoding Falcon has a native media handling mechanism which can decode an incoming request body based on the `Content-Type` header and adding the dictionary of resulting key-value pairs as a cached property `req.media`. I've written my own FalconParser subclass that (very naively) uses this, but it seems like something that might be worth supporting out of the box. ```python def parse_json(self, req, name, field): """ Pull a JSON body value from the request. uses falcon's native req.media """ json_data = self._cache.get("json_data") if json_data is None: self._cache["json_data"] = json_data = req.media return core.get_value(json_data, name, field, allow_many_nested=True) ``` This could probably be improved upon; since the `media` property is already cached on the request object, we could just access `req.media` directly without caching on the parser. (Not sure if this impacts other things that might use that cache, though; I haven't dug deep enough to fully understand that implication.) Also, since `media` was added in 1.3, if webargs still wanted to support older versions of falcon we could add a check for it and fall back to the existing behavior. Maybe something like: ```python def parse_json(self, req, name, field): """Pull a JSON body value from the request. .. note:: The request stream will be read and left at EOF. """ json_data = req.media if hasattr(req, 'media') else self._cache.get("json_data") if json_data is None: self._cache["json_data"] = json_data = parse_json_body(req) return core.get_value(json_data, name, field, allow_many_nested=True) ```
marshmallow-code/webargs
diff --git a/src/webargs/testing.py b/src/webargs/testing.py index ca04040..23bf918 100644 --- a/src/webargs/testing.py +++ b/src/webargs/testing.py @@ -62,9 +62,6 @@ class CommonTestCase: def test_parse_querystring_default(self, testapp): assert testapp.get("/echo").json == {"name": "World"} - def test_parse_json_default(self, testapp): - assert testapp.post_json("/echo_json", {}).json == {"name": "World"} - def test_parse_json_with_charset(self, testapp): res = testapp.post( "/echo_json", diff --git a/tests/apps/falcon_app.py b/tests/apps/falcon_app.py index 314a35a..cb22529 100644 --- a/tests/apps/falcon_app.py +++ b/tests/apps/falcon_app.py @@ -37,6 +37,12 @@ class EchoJSON: resp.body = json.dumps(parsed) +class EchoMedia: + def on_post(self, req, resp): + parsed = parser.parse(hello_args, req, location="media") + resp.body = json.dumps(parsed) + + class EchoJSONOrForm: def on_post(self, req, resp): parsed = parser.parse(hello_args, req, location="json_or_form") @@ -161,6 +167,7 @@ def create_app(): app.add_route("/echo", Echo()) app.add_route("/echo_form", EchoForm()) app.add_route("/echo_json", EchoJSON()) + app.add_route("/echo_media", EchoMedia()) app.add_route("/echo_json_or_form", EchoJSONOrForm()) app.add_route("/echo_use_args", EchoUseArgs()) app.add_route("/echo_use_kwargs", EchoUseKwargs()) diff --git a/tests/test_falconparser.py b/tests/test_falconparser.py index 860c132..4f65313 100644 --- a/tests/test_falconparser.py +++ b/tests/test_falconparser.py @@ -16,28 +16,47 @@ class TestFalconParser(CommonTestCase): def test_use_args_hook(self, testapp): assert testapp.get("/echo_use_args_hook?name=Fred").json == {"name": "Fred"} + def test_parse_media(self, testapp): + assert testapp.post_json("/echo_media", {"name": "Fred"}).json == { + "name": "Fred" + } + + def test_parse_media_missing(self, testapp): + assert testapp.post("/echo_media", "").json == {"name": "World"} + + def test_parse_media_empty(self, testapp): + assert testapp.post_json("/echo_media", {}).json == {"name": "World"} + + def test_parse_media_error_unexpected_int(self, testapp): + res = testapp.post_json("/echo_media", 1, expect_errors=True) + assert res.status_code == 422 + # https://github.com/marshmallow-code/webargs/issues/427 - def test_parse_json_with_nonutf8_chars(self, testapp): + @pytest.mark.parametrize("path", ["/echo_json", "/echo_media"]) + def test_parse_json_with_nonutf8_chars(self, testapp, path): res = testapp.post( - "/echo_json", + path, b"\xfe", headers={"Accept": "application/json", "Content-Type": "application/json"}, expect_errors=True, ) assert res.status_code == 400 - assert res.json["errors"] == {"json": ["Invalid JSON body."]} + if path.endswith("json"): + assert res.json["errors"] == {"json": ["Invalid JSON body."]} # https://github.com/sloria/webargs/issues/329 - def test_invalid_json(self, testapp): + @pytest.mark.parametrize("path", ["/echo_json", "/echo_media"]) + def test_invalid_json(self, testapp, path): res = testapp.post( - "/echo_json", + path, '{"foo": "bar", }', headers={"Accept": "application/json", "Content-Type": "application/json"}, expect_errors=True, ) assert res.status_code == 400 - assert res.json["errors"] == {"json": ["Invalid JSON body."]} + if path.endswith("json"): + assert res.json["errors"] == {"json": ["Invalid JSON body."]} # Falcon converts headers to all-caps def test_parsing_headers(self, testapp):
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
7.0
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.9", "reqs_path": [ "examples/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohappyeyeballs==2.6.1 aiohttp==3.11.14 aiosignal==1.3.2 aniso8601==10.0.0 asgiref==3.8.1 async-timeout==5.0.1 attrs==25.3.0 beautifulsoup4==4.13.3 blinker==1.9.0 bottle==0.13.2 cachetools==5.5.2 cfgv==3.4.0 chardet==5.2.0 click==8.1.8 colorama==0.4.6 distlib==0.3.9 Django==4.2.20 exceptiongroup==1.2.2 falcon==4.0.2 filelock==3.18.0 flake8==3.8.4 flake8-bugbear==20.1.4 Flask==3.1.0 Flask-RESTful==0.3.10 frozenlist==1.5.0 hupper==1.12.1 identify==2.6.9 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 itsdangerous==2.2.0 Jinja2==3.1.6 MarkupSafe==3.0.2 marshmallow==3.26.1 mccabe==0.6.1 multidict==6.2.0 mypy==0.790 mypy_extensions==0.4.4 nodeenv==1.9.1 packaging==24.2 PasteDeploy==3.1.0 plaster==1.1.2 plaster-pastedeploy==1.0.1 platformdirs==4.3.7 pluggy==1.5.0 pre-commit==2.21.0 propcache==0.3.1 pycodestyle==2.6.0 pyflakes==2.2.0 pyproject-api==1.9.0 pyramid==2.0.2 pytest==8.3.5 pytest-aiohttp==1.1.0 pytest-asyncio==0.26.0 python-dateutil==2.8.1 pytz==2025.2 PyYAML==6.0.2 six==1.17.0 soupsieve==2.6 sqlparse==0.5.3 tomli==2.2.1 tornado==6.4.2 tox==4.25.0 translationstring==1.4 typed-ast==1.4.3 typing_extensions==4.13.0 venusian==3.1.1 virtualenv==20.29.3 waitress==3.0.2 webapp2==3.0.0b1 -e git+https://github.com/marshmallow-code/webargs.git@60a4a27143b4844294eb80fa3e8e29653d8f5a5f#egg=webargs WebOb==1.8.9 WebTest==2.0.35 webtest-aiohttp==2.0.0 Werkzeug==3.1.3 yarl==1.18.3 zipp==3.21.0 zope.deprecation==5.1 zope.interface==7.2
name: webargs channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohappyeyeballs==2.6.1 - aiohttp==3.11.14 - aiosignal==1.3.2 - aniso8601==10.0.0 - asgiref==3.8.1 - async-timeout==5.0.1 - attrs==25.3.0 - beautifulsoup4==4.13.3 - blinker==1.9.0 - bottle==0.13.2 - cachetools==5.5.2 - cfgv==3.4.0 - chardet==5.2.0 - click==8.1.8 - colorama==0.4.6 - distlib==0.3.9 - django==4.2.20 - exceptiongroup==1.2.2 - falcon==4.0.2 - filelock==3.18.0 - flake8==3.8.4 - flake8-bugbear==20.1.4 - flask==3.1.0 - flask-restful==0.3.10 - frozenlist==1.5.0 - hupper==1.12.1 - identify==2.6.9 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - itsdangerous==2.2.0 - jinja2==3.1.6 - markupsafe==3.0.2 - marshmallow==3.26.1 - mccabe==0.6.1 - multidict==6.2.0 - mypy==0.790 - mypy-extensions==0.4.4 - nodeenv==1.9.1 - packaging==24.2 - pastedeploy==3.1.0 - plaster==1.1.2 - plaster-pastedeploy==1.0.1 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==2.21.0 - propcache==0.3.1 - pycodestyle==2.6.0 - pyflakes==2.2.0 - pyproject-api==1.9.0 - pyramid==2.0.2 - pytest==8.3.5 - pytest-aiohttp==1.1.0 - pytest-asyncio==0.26.0 - python-dateutil==2.8.1 - pytz==2025.2 - pyyaml==6.0.2 - six==1.17.0 - soupsieve==2.6 - sqlparse==0.5.3 - tomli==2.2.1 - tornado==6.4.2 - tox==4.25.0 - translationstring==1.4 - typed-ast==1.4.3 - typing-extensions==4.13.0 - venusian==3.1.1 - virtualenv==20.29.3 - waitress==3.0.2 - webapp2==3.0.0b1 - webargs==7.0.0b1 - webob==1.8.9 - webtest==2.0.35 - webtest-aiohttp==2.0.0 - werkzeug==3.1.3 - yarl==1.18.3 - zipp==3.21.0 - zope-deprecation==5.1 - zope-interface==7.2 prefix: /opt/conda/envs/webargs
[ "tests/test_falconparser.py::TestFalconParser::test_parse_media_error_unexpected_int", "tests/test_falconparser.py::TestFalconParser::test_parse_json_with_nonutf8_chars[/echo_media]", "tests/test_falconparser.py::TestFalconParser::test_invalid_json[/echo_media]" ]
[ "tests/test_falconparser.py::TestFalconParser::test_parse_querystring_args", "tests/test_falconparser.py::TestFalconParser::test_parse_form", "tests/test_falconparser.py::TestFalconParser::test_parse_json", "tests/test_falconparser.py::TestFalconParser::test_parse_json_missing", "tests/test_falconparser.py::TestFalconParser::test_parse_json_or_form", "tests/test_falconparser.py::TestFalconParser::test_parse_querystring_default", "tests/test_falconparser.py::TestFalconParser::test_parse_json_with_charset", "tests/test_falconparser.py::TestFalconParser::test_parse_json_with_vendor_media_type", "tests/test_falconparser.py::TestFalconParser::test_parse_ignore_extra_data", "tests/test_falconparser.py::TestFalconParser::test_parse_json_empty", "tests/test_falconparser.py::TestFalconParser::test_parse_json_many_schema", "tests/test_falconparser.py::TestFalconParser::test_parsing_form_default", "tests/test_falconparser.py::TestFalconParser::test_parse_querystring_multiple", "tests/test_falconparser.py::TestFalconParser::test_parse_querystring_multiple_single_value", "tests/test_falconparser.py::TestFalconParser::test_parse_form_multiple", "tests/test_falconparser.py::TestFalconParser::test_parse_json_list", "tests/test_falconparser.py::TestFalconParser::test_parse_json_with_nonascii_chars", "tests/test_falconparser.py::TestFalconParser::test_use_args_decorator", "tests/test_falconparser.py::TestFalconParser::test_use_args_with_path_param", "tests/test_falconparser.py::TestFalconParser::test_use_args_with_validation", "tests/test_falconparser.py::TestFalconParser::test_use_kwargs_decorator", "tests/test_falconparser.py::TestFalconParser::test_use_kwargs_with_path_param", "tests/test_falconparser.py::TestFalconParser::test_parsing_cookies", "tests/test_falconparser.py::TestFalconParser::test_parse_nested_json", "tests/test_falconparser.py::TestFalconParser::test_parse_nested_many_json", "tests/test_falconparser.py::TestFalconParser::test_parse_nested_many_missing", "tests/test_falconparser.py::TestFalconParser::test_empty_json", "tests/test_falconparser.py::TestFalconParser::test_empty_json_with_headers", "tests/test_falconparser.py::TestFalconParser::test_content_type_mismatch[/echo_json-{\"name\":", "tests/test_falconparser.py::TestFalconParser::test_content_type_mismatch[/echo_form-payload1-application/json]", "tests/test_falconparser.py::TestFalconParser::test_use_args_hook", "tests/test_falconparser.py::TestFalconParser::test_parse_media", "tests/test_falconparser.py::TestFalconParser::test_parse_media_missing", "tests/test_falconparser.py::TestFalconParser::test_parse_media_empty", "tests/test_falconparser.py::TestFalconParser::test_parsing_headers", "tests/test_falconparser.py::TestFalconParser::test_body_parsing_works_with_simulate" ]
[ "tests/test_falconparser.py::TestFalconParser::test_parse_json_error_unexpected_int", "tests/test_falconparser.py::TestFalconParser::test_parse_json_error_unexpected_list", "tests/test_falconparser.py::TestFalconParser::test_parse_json_many_schema_invalid_input", "tests/test_falconparser.py::TestFalconParser::test_parse_json_many_schema_error_malformed_data", "tests/test_falconparser.py::TestFalconParser::test_parse_json_list_error_malformed_data", "tests/test_falconparser.py::TestFalconParser::test_validation_error_returns_422_response", "tests/test_falconparser.py::TestFalconParser::test_user_validation_error_returns_422_response_by_default", "tests/test_falconparser.py::TestFalconParser::test_parse_json_with_nonutf8_chars[/echo_json]", "tests/test_falconparser.py::TestFalconParser::test_invalid_json[/echo_json]" ]
[]
MIT License
8,887
569
[ "src/webargs/falconparser.py" ]
Materials-Consortia__optimade-python-tools-590
31818d4b6254360ee1b6cc6bf967bd9c28aef9e2
2020-11-07 18:24:06
f9d3b8a8fdf5e438f2b7c746bcdefb661de9c86c
codecov[bot]: # [Codecov](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590?src=pr&el=h1) Report > Merging [#590](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590?src=pr&el=desc) (7dd4647) into [master](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/commit/7e932df10c1159249b4096c255a284d5efa2ad4e?el=desc) (7e932df) will **increase** coverage by `0.02%`. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590/graphs/tree.svg?width=650&height=150&src=pr&token=UJAtmqkZZO)](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #590 +/- ## ========================================== + Coverage 92.11% 92.14% +0.02% ========================================== Files 61 61 Lines 3210 3222 +12 ========================================== + Hits 2957 2969 +12 Misses 253 253 ``` | Flag | Coverage Δ | | |---|---|---| | project | `92.14% <100.00%> (+0.02%)` | :arrow_up: | | validator | `65.05% <66.66%> (+0.13%)` | :arrow_up: | Flags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags#carryforward-flags-in-the-pull-request-comment) to find out more. | [Impacted Files](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [optimade/filtertransformers/mongo.py](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590/diff?src=pr&el=tree#diff-b3B0aW1hZGUvZmlsdGVydHJhbnNmb3JtZXJzL21vbmdvLnB5) | `97.50% <100.00%> (+0.13%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590?src=pr&el=footer). Last update [7e932df...7dd4647](https://codecov.io/gh/Materials-Consortia/optimade-python-tools/pull/590?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). ml-evs: I think this was uncontroversial after our discussion on Slack a few weeks ago, so I'll merge these changes later today unless anyone stops me. ml-evs: > I think this was uncontroversial after our discussion on Slack a few weeks ago, so I'll merge these changes later today unless anyone stops me. Forgot about this PR, @shyamd would be good if you could have a look as this will affect MP
diff --git a/optimade/filtertransformers/mongo.py b/optimade/filtertransformers/mongo.py index 9b09844a..eb806e7a 100755 --- a/optimade/filtertransformers/mongo.py +++ b/optimade/filtertransformers/mongo.py @@ -1,7 +1,8 @@ import copy +import warnings from lark import v_args, Token from optimade.filtertransformers.base_transformer import BaseTransformer -from optimade.server.exceptions import BadRequest +from optimade.server.warnings import TimestampNotRFCCompliant __all__ = ("MongoTransformer",) @@ -42,6 +43,7 @@ class MongoTransformer(BaseTransformer): query = self._apply_length_operators(query) query = self._apply_unknown_or_null_filter(query) query = self._apply_mongo_id_filter(query) + query = self._apply_mongo_date_filter(query) return query @@ -395,8 +397,8 @@ class MongoTransformer(BaseTransformer): if operator not in ("$eq", "$ne"): if self.mapper is not None: prop = self.mapper.alias_of(prop) - raise BadRequest( - detail=f"Operator not supported for query on field {prop!r}, can only test for equality" + raise NotImplementedError( + f"Operator {operator} not supported for query on field {prop!r}, can only test for equality" ) if isinstance(val, str): subdict[prop][operator] = ObjectId(val) @@ -406,6 +408,44 @@ class MongoTransformer(BaseTransformer): filter_, check_for_id_key, replace_str_id_with_objectid ) + def _apply_mongo_date_filter(self, filter_: dict) -> dict: + """This method loops through the query and replaces any operations + on suspected timestamp properties with the corresponding operation + on a BSON `DateTime` type. + """ + + def check_for_timestamp_field(prop, _): + """ Find cases where the query dict is operating on a timestamp field. """ + if self.mapper is not None: + prop = self.mapper.alias_of(prop) + return prop == "last_modified" + + def replace_str_date_with_datetime(subdict, prop, expr): + """Encode suspected dates in with BSON. """ + import bson.json_util + + for operator in subdict[prop]: + query_datetime = bson.json_util.loads( + bson.json_util.dumps({"$date": subdict[prop][operator]}), + json_options=bson.json_util.DEFAULT_JSON_OPTIONS.with_options( + tz_aware=True, tzinfo=bson.tz_util.utc + ), + ) + if query_datetime.microsecond != 0: + warnings.warn( + f"Query for timestamp {subdict[prop][operator]!r} for field {prop!r} contained microseconds, which is not RFC3339 compliant. " + "This may cause undefined behaviour for the underlying database.", + TimestampNotRFCCompliant, + ) + + subdict[prop][operator] = query_datetime + + return subdict + + return recursive_postprocessing( + filter_, check_for_timestamp_field, replace_str_date_with_datetime + ) + def recursive_postprocessing(filter_, condition, replacement): """Recursively descend into the query, checking each dictionary diff --git a/optimade/models/jsonapi.py b/optimade/models/jsonapi.py index 910fc8ed..7938dffb 100644 --- a/optimade/models/jsonapi.py +++ b/optimade/models/jsonapi.py @@ -352,6 +352,13 @@ class Response(BaseModel): return values class Config: + """The specification mandates that datetimes must be encoded following + [RFC3339](https://tools.ietf.org/html/rfc3339), which does not support + fractional seconds, thus they must be stripped in the response. This can + cause issues when the underlying database contains fields that do include + microseconds, as filters may return unexpected results. + """ + json_encoders = { datetime: lambda v: v.astimezone(timezone.utc).strftime( "%Y-%m-%dT%H:%M:%SZ" diff --git a/optimade/server/warnings.py b/optimade/server/warnings.py index 7ce40c8b..4fb60658 100644 --- a/optimade/server/warnings.py +++ b/optimade/server/warnings.py @@ -42,3 +42,10 @@ class QueryParamNotUsed(OptimadeWarning): class MissingExpectedField(OptimadeWarning): """A field was provided with a null value when a related field was provided with a value.""" + + +class TimestampNotRFCCompliant(OptimadeWarning): + """A timestamp has been used in a filter that contains microseconds and is thus not + RFC 3339 compliant. This may cause undefined behaviour in the query results. + + """ diff --git a/optimade/validator/config.py b/optimade/validator/config.py index de626cfa..cf499a16 100644 --- a/optimade/validator/config.py +++ b/optimade/validator/config.py @@ -71,14 +71,11 @@ _INCLUSIVE_OPERATORS = { "ENDS", ), DataType.TIMESTAMP: ( - "=", - "<=", + # "=" and "<=" are disabled due to issue with microseconds stored in database vs API response (see Materials-Consortia/optimade-python-tools/#606) + # ">=" is fine as all microsecond trimming will round times down + # "=", + # "<=", ">=", - "CONTAINS", - "STARTS WITH", - "STARTS", - "ENDS WITH", - "ENDS", ), DataType.INTEGER: ( "=", @@ -97,7 +94,7 @@ exclusive_ops = ("!=", "<", ">") _EXCLUSIVE_OPERATORS = { DataType.STRING: exclusive_ops, - DataType.TIMESTAMP: exclusive_ops, + DataType.TIMESTAMP: (), DataType.FLOAT: exclusive_ops, DataType.INTEGER: exclusive_ops, DataType.LIST: (), diff --git a/optimade/validator/validator.py b/optimade/validator/validator.py index 337137dd..959297e2 100644 --- a/optimade/validator/validator.py +++ b/optimade/validator/validator.py @@ -621,7 +621,7 @@ class ImplementationValidator: else: _test_value = test_value[0] - elif prop_type == DataType.STRING: + elif prop_type in (DataType.STRING, DataType.TIMESTAMP): _test_value = f'"{test_value}"' else: @@ -700,7 +700,7 @@ class ImplementationValidator: self._log.warning(msg) return None, msg - if prop_type in (DataType.DICTIONARY, DataType.TIMESTAMP): + if prop_type in (DataType.DICTIONARY,): msg = f"Not testing queries on field {prop} of type {prop_type}." self._log.warning(msg) return None, msg @@ -823,7 +823,7 @@ class ImplementationValidator: chosen_entry["id"] in set(entry["id"] for entry in response["data"]) ): raise ResponseError( - f"Objects {excluded} were not necessarily excluded by {query}" + f"Object {chosen_entry['id']} with value {test_value} was not excluded by {query}" ) # check that at least the archetypal structure was returned, unless we are using a fallback value
Missing support for timestamps/datetime in grammar The grammar is currently missing support for timestamps (or `datetime.datetime` in terms of Python types). The relevant section in the spec may be found [here](https://github.com/Materials-Consortia/OPTiMaDe/blob/develop/optimade.rst#type-handling-and-conversions-in-comparisons). Specifically we need to recognize timestamps as values being of the form specified by [RFC 3339 Internet Date/Time Format](https://tools.ietf.org/html/rfc3339#section-5.6) and then turn them into `datetime.datetime` objects if they are OK, otherwise, the implementation should return a `400 Bad Request`.
Materials-Consortia/optimade-python-tools
diff --git a/optimade/server/data/test_structures.json b/optimade/server/data/test_structures.json index 6bd7d291..0c4981c4 100644 --- a/optimade/server/data/test_structures.json +++ b/optimade/server/data/test_structures.json @@ -3562,7 +3562,7 @@ ], "formula_anonymous": "A26B8C2D2E2FGHI", "last_modified": { - "$date": "2019-06-08T05:13:37.945Z" + "$date": "2018-06-08T05:13:37.945Z" }, "lattice_vectors": [ [ diff --git a/tests/filtertransformers/test_mongo.py b/tests/filtertransformers/test_mongo.py index 6bcc9892..0696d1d4 100644 --- a/tests/filtertransformers/test_mongo.py +++ b/tests/filtertransformers/test_mongo.py @@ -3,7 +3,6 @@ import pytest from lark.exceptions import VisitError from optimade.filterparser import LarkParser, ParserError -from optimade.server.exceptions import BadRequest class TestMongoTransformer: @@ -361,6 +360,54 @@ class TestMongoTransformer: parser.parse("cartesian_site_positions LENGTH >= 3") ) == {"nsites": {"$gte": 3}} + def test_suspected_timestamp_fields(self, mapper): + import datetime + import bson.tz_util + from optimade.filtertransformers.mongo import MongoTransformer + from optimade.server.warnings import TimestampNotRFCCompliant + + example_RFC3339_date = "2019-06-08T04:13:37Z" + example_RFC3339_date_2 = "2019-06-08T04:13:37" + example_non_RFC3339_date = "2019-06-08T04:13:37.123Z" + + expected_datetime = datetime.datetime( + year=2019, + month=6, + day=8, + hour=4, + minute=13, + second=37, + microsecond=0, + tzinfo=bson.tz_util.utc, + ) + + assert self.transform(f'last_modified > "{example_RFC3339_date}"') == { + "last_modified": {"$gt": expected_datetime} + } + assert self.transform(f'last_modified > "{example_RFC3339_date_2}"') == { + "last_modified": {"$gt": expected_datetime} + } + + non_rfc_datetime = expected_datetime.replace(microsecond=123000) + + with pytest.warns(TimestampNotRFCCompliant): + assert self.transform(f'last_modified > "{example_non_RFC3339_date}"') == { + "last_modified": {"$gt": non_rfc_datetime} + } + + class MyMapper(mapper("StructureMapper")): + ALIASES = (("last_modified", "ctime"),) + + transformer = MongoTransformer(mapper=MyMapper()) + parser = LarkParser(version=self.version, variant=self.variant) + + assert transformer.transform( + parser.parse(f'last_modified > "{example_RFC3339_date}"') + ) == {"ctime": {"$gt": expected_datetime}} + assert transformer.transform( + parser.parse(f'last_modified > "{example_RFC3339_date_2}"') + ) == {"ctime": {"$gt": expected_datetime}} + def test_unaliased_length_operator(self): assert self.transform("cartesian_site_positions LENGTH <= 3") == { "cartesian_site_positions.4": {"$exists": False} @@ -400,7 +447,7 @@ class TestMongoTransformer: for op in ("CONTAINS", "STARTS WITH", "ENDS WITH", "HAS"): with pytest.raises( - BadRequest, + NotImplementedError, match=r".*not supported for query on field 'immutable_id', can only test for equality.*", ): transformer.transform(parser.parse(f'immutable_id {op} "abcdef"')) diff --git a/tests/server/query_params/test_filter.py b/tests/server/query_params/test_filter.py index 2633d162..285145dc 100644 --- a/tests/server/query_params/test_filter.py +++ b/tests/server/query_params/test_filter.py @@ -1,4 +1,5 @@ """Make sure response_fields is handled correctly""" +import pytest def test_custom_field(check_response): @@ -167,6 +168,30 @@ def test_list_correlated(check_error_response): # check_response(request, expected_ids) +def test_timestamp_query(check_response): + from optimade.server.warnings import TimestampNotRFCCompliant + + request = '/structures?filter=last_modified="2019-06-08T05:13:37.331Z"&page_limit=5' + expected_ids = ["mpf_1", "mpf_2", "mpf_3"] + with pytest.warns(TimestampNotRFCCompliant): + check_response(request, expected_ids, expected_as_is=True) + + request = '/structures?filter=last_modified<"2019-06-08T05:13:37.331Z"&page_limit=5' + expected_ids = ["mpf_3819"] + with pytest.warns(TimestampNotRFCCompliant): + check_response(request, expected_ids, expected_as_is=True) + + request = '/structures?filter=last_modified="2018-06-08T05:13:37.945Z"&page_limit=5' + expected_ids = ["mpf_3819"] + with pytest.warns(TimestampNotRFCCompliant): + check_response(request, expected_ids, expected_as_is=True) + + request = '/structures?filter=last_modified>"2018-06-08T05:13:37.945Z" AND last_modified<="2019-06-08T05:13:37.331Z"&page_limit=5' + expected_ids = ["mpf_1", "mpf_2", "mpf_3"] + with pytest.warns(TimestampNotRFCCompliant): + check_response(request, expected_ids, expected_as_is=True) + + def test_is_known(check_response): request = "/structures?filter=nsites IS KNOWN AND nsites>=44" expected_ids = ["mpf_551", "mpf_3803", "mpf_3819"]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 5 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asgiref==3.6.0 astroid==2.12.14 attrs==24.2.0 beautifulsoup4==4.12.3 bracex==2.3.post1 certifi @ file:///croot/certifi_1671487769961/work/certifi cfgv==3.3.1 chardet==4.0.0 click==7.1.2 codecov==2.1.13 coverage==7.2.7 dill==0.3.7 distlib==0.3.9 Django==3.1.4 dnspython==2.3.0 elasticsearch==7.17.12 elasticsearch-dsl==7.3.0 email-validator==1.1.2 exceptiongroup==1.2.2 fastapi==0.63.0 filelock==3.12.2 ghp-import==2.1.0 h11==0.14.0 htmlmin==0.1.12 identify==2.5.24 idna==2.10 importlib-metadata==6.7.0 iniconfig==2.0.0 invoke==1.7.3 isort==5.11.5 Jinja2==2.11.2 jsmin==3.0.1 jsondiff==1.3.1 lark-parser==0.11.1 lazy-object-proxy==1.9.0 Markdown==3.4.4 MarkupSafe==2.1.5 mccabe==0.7.0 mergedeep==1.3.4 mkdocs==1.5.3 mkdocs-awesome-pages-plugin==2.9.2 mkdocs-material==6.2.8 mkdocs-material-extensions==1.2 mkdocs-minify-plugin==0.3.0 mkdocstrings==0.13.6 mongomock==3.22.0 natsort==8.4.0 nodeenv==1.9.1 numpy==1.21.6 -e git+https://github.com/Materials-Consortia/optimade-python-tools.git@31818d4b6254360ee1b6cc6bf967bd9c28aef9e2#egg=optimade packaging==24.0 pathspec==0.11.2 platformdirs==2.6.1 pluggy==1.2.0 pre-commit==2.21.0 py==1.11.0 pydantic==1.6.1 Pygments==2.17.2 pylint==2.15.10 pymdown-extensions==8.2 pymongo==3.11.2 pytest==6.2.5 pytest-cov==2.12.1 python-dateutil==2.9.0.post0 pytkdocs==0.9.0 pytz==2025.2 PyYAML==6.0.1 pyyaml_env_tag==0.1 requests==2.25.1 sentinels==1.0.0 six==1.17.0 soupsieve==2.4.1 sqlparse==0.4.4 starlette==0.13.6 toml==0.10.2 tomli==2.0.1 tomlkit==0.12.5 typed-ast==1.5.5 typing-extensions==3.10.0.2 urllib3==1.26.20 uvicorn==0.13.3 virtualenv==20.21.1 watchdog==3.0.0 wcmatch==8.4.1 wrapt==1.16.0 zipp==3.15.0
name: optimade-python-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asgiref==3.6.0 - astroid==2.12.14 - attrs==24.2.0 - beautifulsoup4==4.12.3 - bracex==2.3.post1 - cfgv==3.3.1 - chardet==4.0.0 - click==7.1.2 - codecov==2.1.13 - coverage==7.2.7 - dill==0.3.7 - distlib==0.3.9 - django==3.1.4 - dnspython==2.3.0 - elasticsearch==7.17.12 - elasticsearch-dsl==7.3.0 - email-validator==1.1.2 - exceptiongroup==1.2.2 - fastapi==0.63.0 - filelock==3.12.2 - ghp-import==2.1.0 - h11==0.14.0 - htmlmin==0.1.12 - identify==2.5.24 - idna==2.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - invoke==1.7.3 - isort==5.11.5 - jinja2==2.11.2 - jsmin==3.0.1 - jsondiff==1.3.1 - lark-parser==0.11.1 - lazy-object-proxy==1.9.0 - markdown==3.4.4 - markupsafe==2.1.5 - mccabe==0.7.0 - mergedeep==1.3.4 - mkdocs==1.5.3 - mkdocs-awesome-pages-plugin==2.9.2 - mkdocs-material==6.2.8 - mkdocs-material-extensions==1.2 - mkdocs-minify-plugin==0.3.0 - mkdocstrings==0.13.6 - mongomock==3.22.0 - natsort==8.4.0 - nodeenv==1.9.1 - numpy==1.21.6 - packaging==24.0 - pathspec==0.11.2 - platformdirs==2.6.1 - pluggy==1.2.0 - pre-commit==2.21.0 - py==1.11.0 - pydantic==1.6.1 - pygments==2.17.2 - pylint==2.15.10 - pymdown-extensions==8.2 - pymongo==3.11.2 - pytest==6.2.5 - pytest-cov==2.12.1 - python-dateutil==2.9.0.post0 - pytkdocs==0.9.0 - pytz==2025.2 - pyyaml==6.0.1 - pyyaml-env-tag==0.1 - requests==2.25.1 - sentinels==1.0.0 - six==1.17.0 - soupsieve==2.4.1 - sqlparse==0.4.4 - starlette==0.13.6 - toml==0.10.2 - tomli==2.0.1 - tomlkit==0.12.5 - typed-ast==1.5.5 - typing-extensions==3.10.0.2 - urllib3==1.26.20 - uvicorn==0.13.3 - virtualenv==20.21.1 - watchdog==3.0.0 - wcmatch==8.4.1 - wrapt==1.16.0 - zipp==3.15.0 prefix: /opt/conda/envs/optimade-python-tools
[ "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_suspected_timestamp_fields", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_mongo_special_id" ]
[]
[ "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_empty", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_property_names", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_string_values", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_number_values", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_simple_comparisons", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_id", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_operators", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_filtering_on_relationships", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_not_implemented", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_list_length_aliases", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_unaliased_length_operator", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_aliased_length_operator", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_aliases", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_list_properties", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_known_properties", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_precedence", "tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_special_cases" ]
[]
MIT License
8,898
1,778
[ "optimade/filtertransformers/mongo.py", "optimade/models/jsonapi.py", "optimade/server/warnings.py", "optimade/validator/config.py", "optimade/validator/validator.py" ]
celery__py-amqp-344
8e5ddd41b776f31faea6a47fcc757b76c567b007
2020-11-07 22:18:49
be6b5ededa5654ca43cea67927667456e32523a3
diff --git a/amqp/transport.py b/amqp/transport.py index 3eec88f..b183120 100644 --- a/amqp/transport.py +++ b/amqp/transport.py @@ -354,9 +354,9 @@ class SSLTransport(_AbstractTransport): def _wrap_socket_sni(self, sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, - do_handshake_on_connect=False, + ca_certs=None, do_handshake_on_connect=False, suppress_ragged_eofs=True, server_hostname=None, - ssl_version=ssl.PROTOCOL_TLS): + ciphers=None, ssl_version=ssl.PROTOCOL_TLS): """Socket wrap with SNI headers. stdlib `ssl.SSLContext.wrap_socket` method augmented with support for @@ -373,6 +373,10 @@ class SSLTransport(_AbstractTransport): context = ssl.SSLContext(ssl_version) if certfile is not None: context.load_cert_chain(certfile, keyfile) + if ca_certs is not None: + context.load_verify_locations(ca_certs) + if ciphers: + context.set_ciphers(ciphers) if cert_reqs != ssl.CERT_NONE: context.check_hostname = True # Set SNI headers if supported
SSLTransport error: unexpected keyword argument 'ca_certs' Using amqp version 5.0.1, I discovered an SSLTransport error. When I use amqp version 5.0.0b1, SSL works just fine. Here's the error I get when using amqp==5.0.1: `/usr/local/lib/python3.8/dist-packages/kombu/connection.py:283: in channel chan = self.transport.create_channel(self.connection) /usr/local/lib/python3.8/dist-packages/kombu/connection.py:858: in connection return self._ensure_connection( /usr/local/lib/python3.8/dist-packages/kombu/connection.py:435: in _ensure_connection return retry_over_time( /usr/local/lib/python3.8/dist-packages/kombu/utils/functional.py:325: in retry_over_time return fun(*args, **kwargs) /usr/local/lib/python3.8/dist-packages/kombu/connection.py:866: in _connection_factory self._connection = self._establish_connection() /usr/local/lib/python3.8/dist-packages/kombu/connection.py:801: in _establish_connection conn = self.transport.establish_connection() /usr/local/lib/python3.8/dist-packages/kombu/transport/pyamqp.py:128: in establish_connection conn.connect() /usr/local/lib/python3.8/dist-packages/amqp/connection.py:314: in connect self.transport.connect() /usr/local/lib/python3.8/dist-packages/amqp/transport.py:77: in connect self._init_socket( /usr/local/lib/python3.8/dist-packages/amqp/transport.py:188: in _init_socket self._setup_transport() /usr/local/lib/python3.8/dist-packages/amqp/transport.py:323: in _setup_transport self.sock = self._wrap_socket(self.sock, **self.sslopts) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <amqp.transport.SSLTransport object at 0x7f8a0083a370> sock = <socket.socket fd=11, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=6, laddr=('127.0.0.1', 41008), raddr=('127.0.0.1', 5671)> context = None sslopts = {'ca_certs': '/home/cameron/Defense/tls-gen/basic/result/ca_certificate.pem', 'cert_reqs': <VerifyMode.CERT_REQUIRED: ...e/tls-gen/basic/result/client_certificate.pem', 'keyfile': '/home/cameron/Defense/tls-gen/basic/result/client_key.pem'} def _wrap_socket(self, sock, context=None, **sslopts): if context: return self._wrap_context(sock, sslopts, **context) > return self._wrap_socket_sni(sock, **sslopts) E TypeError: _wrap_socket_sni() got an unexpected keyword argument 'ca_certs' /usr/local/lib/python3.8/dist-packages/amqp/transport.py:330: TypeError ----------------------------------------------------------- Captured stdout call ----------------------------------------------------------- 2020-10-28 16:46:54: INFO: Consumer successfully connected to kombu server at localhost:5671 2020-10-28 16:46:54: ERROR: Failed to initialize rabbit consumer connection: Traceback (most recent call last): TypeError: _wrap_socket_sni() got an unexpected keyword argument 'ca_certs'` Looking at `py-amqp/amqp/transport.py`, the parameters for the method `_wrap_socket_sni` for amqp==5.0.1, it doesn't include `ca_certs`. On amqp==5.0.0b1, the method _wrap_socket_sni` does include `ca_certs` as a parameter. Is this a bug or was this left out by accident? Here are the links to amqp version 5.0.1 and 5.0.0b1 of `py-amqp/amqp/transport.py` with the line numbers highlighting the method: amqp==5.0.1: https://github.com/celery/py-amqp/blob/93e4f3a2990f2ed1a6da861c99c7f0a3b0d32160/amqp/transport.py#L337 amqp==5.0.0b1: https://github.com/celery/py-amqp/blob/c5fe7daaf379cfbcccbe81fcd1ea12807274f8fb/amqp/transport.py#L339
celery/py-amqp
diff --git a/t/unit/test_transport.py b/t/unit/test_transport.py index d94a520..5eec1ab 100644 --- a/t/unit/test_transport.py +++ b/t/unit/test_transport.py @@ -4,7 +4,7 @@ import re import socket import struct from struct import pack -from unittest.mock import ANY, MagicMock, Mock, call, patch +from unittest.mock import ANY, MagicMock, Mock, call, patch, sentinel import pytest @@ -638,14 +638,136 @@ class test_SSLTransport: ctx.wrap_socket.assert_called_with(sock, f=1) def test_wrap_socket_sni(self): + # testing default values of _wrap_socket_sni() sock = Mock() - with patch('ssl.SSLContext.wrap_socket') as mock_ssl_wrap: - self.t._wrap_socket_sni(sock) - mock_ssl_wrap.assert_called_with(sock=sock, - server_side=False, - do_handshake_on_connect=False, - suppress_ragged_eofs=True, - server_hostname=None) + with patch( + 'ssl.SSLContext.wrap_socket', + return_value=sentinel.WRAPPED_SOCKET) as mock_ssl_wrap: + ret = self.t._wrap_socket_sni(sock) + + mock_ssl_wrap.assert_called_with(sock=sock, + server_side=False, + do_handshake_on_connect=False, + suppress_ragged_eofs=True, + server_hostname=None) + + assert ret == sentinel.WRAPPED_SOCKET + + def test_wrap_socket_sni_certfile(self): + # testing _wrap_socket_sni() with parameters certfile and keyfile + sock = Mock() + with patch( + 'ssl.SSLContext.wrap_socket', + return_value=sentinel.WRAPPED_SOCKET) as mock_ssl_wrap, \ + patch('ssl.SSLContext.load_cert_chain') as mock_load_cert_chain: + ret = self.t._wrap_socket_sni( + sock, keyfile=sentinel.KEYFILE, certfile=sentinel.CERTFILE) + + mock_load_cert_chain.assert_called_with( + sentinel.CERTFILE, sentinel.KEYFILE) + mock_ssl_wrap.assert_called_with(sock=sock, + server_side=False, + do_handshake_on_connect=False, + suppress_ragged_eofs=True, + server_hostname=None) + + assert ret == sentinel.WRAPPED_SOCKET + + def test_wrap_socket_ca_certs(self): + # testing _wrap_socket_sni() with parameter ca_certs + sock = Mock() + with patch( + 'ssl.SSLContext.wrap_socket', + return_value=sentinel.WRAPPED_SOCKET + ) as mock_ssl_wrap, patch( + 'ssl.SSLContext.load_verify_locations' + ) as mock_load_verify_locations: + ret = self.t._wrap_socket_sni(sock, ca_certs=sentinel.CA_CERTS) + + mock_load_verify_locations.assert_called_with(sentinel.CA_CERTS) + mock_ssl_wrap.assert_called_with(sock=sock, + server_side=False, + do_handshake_on_connect=False, + suppress_ragged_eofs=True, + server_hostname=None) + + assert ret == sentinel.WRAPPED_SOCKET + + def test_wrap_socket_ciphers(self): + # testing _wrap_socket_sni() with parameter ciphers + sock = Mock() + with patch( + 'ssl.SSLContext.wrap_socket', + return_value=sentinel.WRAPPED_SOCKET) as mock_ssl_wrap, \ + patch('ssl.SSLContext.set_ciphers') as mock_set_ciphers: + ret = self.t._wrap_socket_sni(sock, ciphers=sentinel.CIPHERS) + + mock_set_ciphers.assert_called_with(sentinel.CIPHERS) + mock_ssl_wrap.assert_called_with(sock=sock, + server_side=False, + do_handshake_on_connect=False, + suppress_ragged_eofs=True, + server_hostname=None) + assert ret == sentinel.WRAPPED_SOCKET + + def test_wrap_socket_sni_cert_reqs(self): + # testing _wrap_socket_sni() with parameter cert_reqs + sock = Mock() + with patch('ssl.SSLContext') as mock_ssl_context_class: + wrap_socket_method_mock = mock_ssl_context_class().wrap_socket + wrap_socket_method_mock.return_value = sentinel.WRAPPED_SOCKET + ret = self.t._wrap_socket_sni(sock, cert_reqs=sentinel.CERT_REQS) + + wrap_socket_method_mock.assert_called_with( + sock=sock, + server_side=False, + do_handshake_on_connect=False, + suppress_ragged_eofs=True, + server_hostname=None + ) + assert mock_ssl_context_class().check_hostname is True + assert ret == sentinel.WRAPPED_SOCKET + + def test_wrap_socket_sni_setting_sni_header(self): + # testing _wrap_socket_sni() with setting SNI header + sock = Mock() + with patch('ssl.SSLContext') as mock_ssl_context_class, \ + patch('ssl.HAS_SNI', new=True): + # SSL module supports SNI + wrap_socket_method_mock = mock_ssl_context_class().wrap_socket + wrap_socket_method_mock.return_value = sentinel.WRAPPED_SOCKET + ret = self.t._wrap_socket_sni( + sock, cert_reqs=sentinel.CERT_REQS, + server_hostname=sentinel.SERVER_HOSTNAME + ) + wrap_socket_method_mock.assert_called_with( + sock=sock, + server_side=False, + do_handshake_on_connect=False, + suppress_ragged_eofs=True, + server_hostname=sentinel.SERVER_HOSTNAME + ) + assert mock_ssl_context_class().verify_mode == sentinel.CERT_REQS + assert ret == sentinel.WRAPPED_SOCKET + + with patch('ssl.SSLContext') as mock_ssl_context_class, \ + patch('ssl.HAS_SNI', new=False): + # SSL module does not support SNI + wrap_socket_method_mock = mock_ssl_context_class().wrap_socket + wrap_socket_method_mock.return_value = sentinel.WRAPPED_SOCKET + ret = self.t._wrap_socket_sni( + sock, cert_reqs=sentinel.CERT_REQS, + server_hostname=sentinel.SERVER_HOSTNAME + ) + wrap_socket_method_mock.assert_called_with( + sock=sock, + server_side=False, + do_handshake_on_connect=False, + suppress_ragged_eofs=True, + server_hostname=sentinel.SERVER_HOSTNAME + ) + assert mock_ssl_context_class().verify_mode != sentinel.CERT_REQS + assert ret == sentinel.WRAPPED_SOCKET def test_shutdown_transport(self): self.t.sock = None
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
5.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.8", "reqs_path": [ "requirements/default.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/celery/py-amqp.git@8e5ddd41b776f31faea6a47fcc757b76c567b007#egg=amqp coverage==7.6.1 exceptiongroup==1.2.2 execnet==2.1.1 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 tomli==2.2.1 vine==5.0.0
name: py-amqp channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.6.1 - exceptiongroup==1.2.2 - execnet==2.1.1 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-asyncio==0.24.0 - pytest-cov==5.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - tomli==2.2.1 - vine==5.0.0 prefix: /opt/conda/envs/py-amqp
[ "t/unit/test_transport.py::test_SSLTransport::test_wrap_socket_ca_certs", "t/unit/test_transport.py::test_SSLTransport::test_wrap_socket_ciphers" ]
[]
[ "t/unit/test_transport.py::test_SSLTransport::test_repr_disconnected", "t/unit/test_transport.py::test_SSLTransport::test_repr_connected", "t/unit/test_transport.py::test_SSLTransport::test_setup_transport", "t/unit/test_transport.py::test_SSLTransport::test_wrap_socket", "t/unit/test_transport.py::test_SSLTransport::test_wrap_context", "t/unit/test_transport.py::test_SSLTransport::test_wrap_socket_sni", "t/unit/test_transport.py::test_SSLTransport::test_wrap_socket_sni_certfile", "t/unit/test_transport.py::test_SSLTransport::test_wrap_socket_sni_cert_reqs", "t/unit/test_transport.py::test_SSLTransport::test_wrap_socket_sni_setting_sni_header", "t/unit/test_transport.py::test_SSLTransport::test_shutdown_transport", "t/unit/test_transport.py::test_SSLTransport::test_read_EOF", "t/unit/test_transport.py::test_SSLTransport::test_write_success", "t/unit/test_transport.py::test_SSLTransport::test_write_socket_closed", "t/unit/test_transport.py::test_SSLTransport::test_write_ValueError", "t/unit/test_transport.py::test_SSLTransport::test_read_timeout", "t/unit/test_transport.py::test_SSLTransport::test_read_SSLError", "t/unit/test_transport.py::test_TCPTransport::test_repr_disconnected", "t/unit/test_transport.py::test_TCPTransport::test_repr_connected", "t/unit/test_transport.py::test_TCPTransport::test_setup_transport", "t/unit/test_transport.py::test_TCPTransport::test_read_EOF", "t/unit/test_transport.py::test_TCPTransport::test_read_frame__windowstimeout" ]
[]
BSD License (3 Clause)
8,899
310
[ "amqp/transport.py" ]
CharJon__GeCO-9
1d6c60958173ef26b28d5d6288b1cfa120664d48
2020-11-10 14:22:00
1d6c60958173ef26b28d5d6288b1cfa120664d48
diff --git a/geco/mips/facility_location.py b/geco/mips/facility_location.py index ec8c2d4..7139f3e 100644 --- a/geco/mips/facility_location.py +++ b/geco/mips/facility_location.py @@ -2,8 +2,10 @@ import itertools import numpy as np import pyscipopt as scip +from networkx.utils import py_random_state +@py_random_state(3) def capacitated_facility_location(n_customers, n_facilities, ratio, seed=0): """ Generate a Capacited Facility Location problem following @@ -19,23 +21,21 @@ def capacitated_facility_location(n_customers, n_facilities, ratio, seed=0): The desired number of facilities. ratio: float The desired capacity / demand ratio. - seed: int - The seed to use for random numbers. + seed: integer, random_state, or None + Indicator of random number generation state. """ - rng = np.random.RandomState(seed) - # locations for customers - c_x = rng.rand(n_customers) - c_y = rng.rand(n_customers) + c_x = np.array([seed.random() for _ in range(n_customers)]) + c_y = np.array([seed.random() for _ in range(n_customers)]) # locations for facilities - f_x = rng.rand(n_facilities) - f_y = rng.rand(n_facilities) + f_x = np.array([seed.random() for _ in range(n_facilities)]) + f_y = np.array([seed.random() for _ in range(n_facilities)]) - demands = rng.randint(5, 35 + 1, size=n_customers) - capacities = rng.randint(10, 160 + 1, size=n_facilities) - fixed_costs = rng.randint(100, 110 + 1, size=n_facilities) * np.sqrt(capacities) \ - + rng.randint(90 + 1, size=n_facilities) + demands = np.array(seed.sample(range(5, 35 + 1), k=n_customers)) + capacities = np.array(seed.sample(range(10, 160 + 1), k=n_facilities)) + fixed_costs = np.array(seed.sample(range(100, 110 + 1), k=n_facilities) * np.sqrt(capacities)) \ + + np.array(seed.sample(range(90 + 1), k=n_facilities)) fixed_costs = fixed_costs.astype(int) total_demand = demands.sum() diff --git a/geco/mips/knapsack.py b/geco/mips/knapsack.py index 2f889ae..9b221a7 100644 --- a/geco/mips/knapsack.py +++ b/geco/mips/knapsack.py @@ -1,20 +1,20 @@ -import random import math import pyscipopt as scip +from networkx.utils import py_random_state -def yang(n, seed): +@py_random_state(1) +def yang(n, seed=0): """ Generates knapsack instance parameters according to: Yu Yang, Natashia Boland, Bistra Dilkina, Martin Savelsbergh, "Learning Generalized Strong Branching for Set Covering, Set Packing, and 0-1 Knapsack Problems", 2020. """ - random.seed(seed) def draw_value(): - return random.randint(1, 10 * n) + return seed.randint(1, 10 * n) profits = [draw_value() for _ in range(n)] weights = [draw_value() for _ in range(n)] diff --git a/geco/mips/scheduling.py b/geco/mips/scheduling.py index a12ec87..ce15a0b 100644 --- a/geco/mips/scheduling.py +++ b/geco/mips/scheduling.py @@ -3,8 +3,9 @@ This module implements the scheduling problem MIP generation techniques from dif """ import itertools + import pyscipopt as scip -import random +from networkx.utils import py_random_state def hooker_late_tasks_formulation( @@ -15,8 +16,7 @@ def hooker_late_tasks_formulation( C, c, r, - d, - seed=0, + d ): # TODO: use more expressive param names """Generates late tasks mip formulation described in section 4 in @@ -25,13 +25,11 @@ def hooker_late_tasks_formulation( number_of_facilities: the number of facilities to schedule on number_of_tasks: the number of tasks to assign to facilities time_steps: the number of time steps starting from 0 (corresponds to "N" in the paper) - seed: used for randomization Other parameters follow the same naming used in the paper Returns: model: SCIP model of the late tasks instance """ - random.seed(seed) model = scip.Model("Hooker Scheduling Late Tasks Formulation") assert min(r) == 0 # TODO: handle the case that timesteps don't start at 0 @@ -103,21 +101,21 @@ def generate_hookers_instances(): ) -def generate_params(number_of_facilities, number_of_tasks, seed): - random.seed(seed) +@py_random_state(2) +def generate_params(number_of_facilities, number_of_tasks, seed=0): p = {} for j, i in itertools.product(range(number_of_tasks), range(number_of_facilities)): if number_of_tasks < 22: - p[j, i] = random.randint(2, 20 + 5 * i) + p[j, i] = seed.randint(2, 20 + 5 * i) else: - p[j, i] = random.randint(5, 20 + 5 * i) + p[j, i] = seed.randint(5, 20 + 5 * i) C = [10] * number_of_facilities c = {} for i in range(number_of_facilities): - value = random.randint(1, 10) + value = seed.randint(1, 10) for j in range(number_of_tasks): c[j, i] = value @@ -126,11 +124,11 @@ def generate_params(number_of_facilities, number_of_tasks, seed): d = {} beta = 20 / 9 for j in range(number_of_tasks): - d[j] = random.uniform(beta * number_of_tasks / 4, beta * number_of_tasks) + d[j] = seed.uniform(beta * number_of_tasks / 4, beta * number_of_tasks) r = {} for j, k in itertools.product(range(number_of_tasks), range(number_of_facilities)): - r[j, k] = random.randint(1, 9) + r[j, k] = seed.randint(1, 9) return p, C, c, R, d, r @@ -143,8 +141,7 @@ def heinz_formulation( c, R, d, - r, - seed=0, + r ): """Generates mip formulation according to Model 4 in # TODO: Add paper reference @@ -152,12 +149,10 @@ def heinz_formulation( number_of_facilities: the number of facilities to schedule on number_of_tasks: the number of tasks to assign to facilities time_steps: the number of time steps starting from 0 (corresponds to "N" in the paper) - seed: used for randomization Returns: model: SCIP model of the late tasks instance """ - random.seed(seed) model = scip.Model("Heinz Scheduling") time_steps = range(min(R), int(max(d.values())))
Avoid side effects when using random numbers. Networkx decorator might be usefull. See e.g. [here](https://networkx.org/documentation/stable/reference/randomness.html).
CharJon/GeCO
diff --git a/geco/mips/tests/test_facility_location.py b/geco/mips/tests/test_facility_location.py index f404678..d21b2b4 100644 --- a/geco/mips/tests/test_facility_location.py +++ b/geco/mips/tests/test_facility_location.py @@ -2,10 +2,10 @@ from geco.mips.facility_location import * def test_capacitated_facility_location(): - n_customers, n_facilities, ratio = 25, 10, 2 - m_1 = capacitated_facility_location(n_customers, n_facilities, ratio) - assert m_1.getNVars() == n_customers * n_facilities + n_facilities - assert m_1.getNConss() == n_customers + n_facilities + 1 + n_customers * n_facilities - assert m_1.getObjectiveSense() == "minimize" - m_1.optimize() - assert 5679 <= m_1.getObjVal() <= 5680 + n_customers, n_facilities, ratio, seed = 25, 10, 2, 0 + model = capacitated_facility_location(n_customers, n_facilities, ratio, seed) + assert model.getNVars() == n_customers * n_facilities + n_facilities + assert model.getNConss() == n_customers + n_facilities + 1 + n_customers * n_facilities + assert model.getObjectiveSense() == "minimize" + model.optimize() + assert 5856 <= model.getObjVal() <= 5857
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": [ "conda-dev-env.yml" ], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": true, "packages": "environment.yml", "pip_packages": [ "pytest", "coverage" ], "pre_install": null, "python": "3.8", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1726753373685/work argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1692818318753/work argon2-cffi-bindings @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi-bindings_1649500309442/work arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1696128962909/work asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1733175639022/work async-lru @ file:///home/conda/feedstock_root/build_artifacts/async-lru_1690563019058/work attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1722977137225/work babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1730878832677/work backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1705564648255/work bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1648883617327/work cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1636046063618/work charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work click==8.1.8 colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1710320294760/work coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1652409050186/work debugpy @ file:///croot/debugpy_1690905042057/work decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work Deprecated==1.2.18 dimod==0.12.17 dwave_networkx==0.8.15 entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1720869315914/work executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1725214404607/work fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1718477020893/work/dist fqdn @ file:///home/conda/feedstock_root/build_artifacts/fqdn_1638810296540/work/dist -e git+https://github.com/CharJon/GeCO.git@1d6c60958173ef26b28d5d6288b1cfa120664d48#egg=GeCO h11 @ file:///home/conda/feedstock_root/build_artifacts/h11_1664132893548/work h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1634280454336/work hpack==4.0.0 httpcore @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_httpcore_1731707562/work httpx @ file:///home/conda/feedstock_root/build_artifacts/httpx_1724778349782/work hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1619110129307/work idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1726082825846/work importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1725921340658/work iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1673103042956/work ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1719845459717/work ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1683289033986/work ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1724334859652/work isoduration @ file:///home/conda/feedstock_root/build_artifacts/isoduration_1638811571363/work/dist jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1715127149914/work json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1712986206667/work jsonpointer @ file:///home/conda/feedstock_root/build_artifacts/jsonpointer_1718283368615/work jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1720529478715/work jsonschema-specifications @ file:///tmp/tmpvslgxhz5/src jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1725037521377/work jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1678118109161/work jupyter-events @ file:///home/conda/feedstock_root/build_artifacts/jupyter_events_1710805637316/work jupyter-lsp @ file:///home/conda/feedstock_root/build_artifacts/jupyter-lsp-meta_1712707420468/work/jupyter-lsp jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1673615989977/work jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1727163409502/work jupyter_server @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_1720816649297/work jupyter_server_terminals @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_terminals_1710262634903/work jupyterlab @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_1730308726474/work jupyterlab_pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1707149102966/work jupyterlab_server @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_server-split_1721163288448/work jupyterlab_widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1724331334887/work MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1648737563195/work matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1713250518406/work mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1698947099619/work nbclassic @ file:///home/conda/feedstock_root/build_artifacts/nbclassic_1716838762700/work nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1734628800805/work nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert-meta_1733405477194/work nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1712238998817/work nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work networkx==2.8.8 notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1715848908871/work notebook_shim @ file:///home/conda/feedstock_root/build_artifacts/notebook-shim_1707957777232/work numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1651020413938/work overrides @ file:///home/conda/feedstock_root/build_artifacts/overrides_1706394519472/work packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work pandas==1.4.2 pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1712320355065/work pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1706113125309/work pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1694617248815/work platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1726613481435/work pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1713667077545/work prometheus_client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1726901976720/work prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1727341649933/work psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1653089172347/work ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl pure_eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1721585709575/work pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1711811537435/work Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1714846767233/work PySCIPOpt @ file:///home/conda/feedstock_root/build_artifacts/pyscipopt_1638955097361/work PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1661604839144/work pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1733087655016/work python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work python-json-logger @ file:///home/conda/feedstock_root/build_artifacts/python-json-logger_1677079630776/work pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1648757091578/work pyzmq @ file:///croot/pyzmq_1705605076900/work referencing @ file:///home/conda/feedstock_root/build_artifacts/referencing_1714619483868/work requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1717057054362/work rfc3339-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3339-validator_1638811747357/work rfc3986-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3986-validator_1598024191506/work rpds-py @ file:///croot/rpds-py_1698945930462/work scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1653073867187/work Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1712584999685/work six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1708952932303/work soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1693929250441/work stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work tabulate==0.8.10 terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1710262609923/work tinycss2 @ file:///home/conda/feedstock_root/build_artifacts/tinycss2_1729802851396/work tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1727974628237/work tornado @ file:///croot/tornado_1718740109488/work tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1732497199771/work traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1713535121073/work tsplib95 @ git+https://github.com/rhgrant10/tsplib95.git@57e73472ac2bdf64562b0c1cafa058395591da0a types-python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/types-python-dateutil_1727940235703/work typing-utils @ file:///home/conda/feedstock_root/build_artifacts/typing_utils_1622899189314/work typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1717802530399/work uri-template @ file:///home/conda/feedstock_root/build_artifacts/uri-template_1688655812972/work/dist urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1726496430923/work wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1704731205417/work webcolors @ file:///home/conda/feedstock_root/build_artifacts/webcolors_1723294704277/work webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work websocket-client @ file:///home/conda/feedstock_root/build_artifacts/websocket-client_1713923384721/work widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1724331337528/work wrapt==1.17.2 zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1731262100163/work zstandard @ file:///croot/zstandard_1728569189425/work
name: GeCO channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ampl-mp=3.1.0=h2cc385e_1006 - anyio=4.5.0=pyhd8ed1ab_0 - argon2-cffi=23.1.0=pyhd8ed1ab_0 - argon2-cffi-bindings=21.2.0=py38h0a891b7_2 - arrow=1.3.0=pyhd8ed1ab_0 - asttokens=3.0.0=pyhd8ed1ab_0 - async-lru=2.0.4=pyhd8ed1ab_0 - attrs=24.2.0=pyh71513ae_0 - babel=2.16.0=pyhd8ed1ab_0 - backcall=0.2.0=pyh9f0ad1d_0 - beautifulsoup4=4.12.3=pyha770c72_0 - bleach=6.1.0=pyhd8ed1ab_0 - brotli-python=1.0.9=py38hfa26641_7 - ca-certificates=2025.2.25=h06a4308_0 - cached-property=1.5.2=hd8ed1ab_1 - cached_property=1.5.2=pyha770c72_1 - certifi=2024.8.30=pyhd8ed1ab_0 - cffi=1.15.0=py38h3931269_0 - charset-normalizer=3.4.0=pyhd8ed1ab_0 - colorama=0.4.6=pyhd8ed1ab_0 - comm=0.2.2=pyhd8ed1ab_0 - coverage=6.3.3=py38h0a891b7_0 - cppad=20210000.6=h9c3ff4c_0 - debugpy=1.6.7=py38h6a678d5_0 - decorator=5.1.1=pyhd8ed1ab_0 - defusedxml=0.7.1=pyhd8ed1ab_0 - entrypoints=0.4=pyhd8ed1ab_0 - exceptiongroup=1.2.2=pyhd8ed1ab_0 - executing=2.1.0=pyhd8ed1ab_0 - fqdn=1.5.1=pyhd8ed1ab_0 - gmp=6.2.1=h58526e2_0 - h11=0.14.0=pyhd8ed1ab_0 - h2=4.1.0=pyhd8ed1ab_0 - hpack=4.0.0=pyh9f0ad1d_0 - httpcore=1.0.7=pyh29332c3_1 - httpx=0.27.2=pyhd8ed1ab_0 - hyperframe=6.0.1=pyhd8ed1ab_0 - idna=3.10=pyhd8ed1ab_0 - importlib-metadata=8.5.0=pyha770c72_0 - importlib_resources=6.4.5=pyhd8ed1ab_0 - iniconfig=2.0.0=pyhd8ed1ab_0 - ipopt=3.14.1=h7ede334_0 - ipykernel=6.29.5=pyh3099207_0 - ipython=8.12.2=pyh41d4057_0 - ipython_genutils=0.2.0=pyhd8ed1ab_1 - ipywidgets=8.1.5=pyhd8ed1ab_0 - isoduration=20.11.0=pyhd8ed1ab_0 - jedi=0.19.1=pyhd8ed1ab_0 - jinja2=3.1.4=pyhd8ed1ab_0 - json5=0.9.25=pyhd8ed1ab_0 - jsonpointer=3.0.0=py38h578d9bd_0 - jsonschema=4.23.0=pyhd8ed1ab_0 - jsonschema-specifications=2024.10.1=pyhd8ed1ab_0 - jsonschema-with-format-nongpl=4.23.0=hd8ed1ab_1 - jupyter=1.1.1=pyhd8ed1ab_0 - jupyter-lsp=2.2.5=pyhd8ed1ab_0 - jupyter_client=7.4.9=pyhd8ed1ab_0 - jupyter_console=6.6.3=pyhd8ed1ab_0 - jupyter_core=5.7.2=pyh31011fe_1 - jupyter_events=0.10.0=pyhd8ed1ab_0 - jupyter_server=2.14.2=pyhd8ed1ab_0 - jupyter_server_terminals=0.5.3=pyhd8ed1ab_0 - jupyterlab=4.3.0=pyhd8ed1ab_0 - jupyterlab_pygments=0.3.0=pyhd8ed1ab_1 - jupyterlab_server=2.27.3=pyhd8ed1ab_0 - jupyterlab_widgets=3.0.13=pyhd8ed1ab_0 - ld_impl_linux-64=2.40=h12ee557_0 - libblas=3.9.0=16_linux64_openblas - libcblas=3.9.0=16_linux64_openblas - libedit=3.1.20191231=he28a2e2_2 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgfortran-ng=13.2.0=h69a702a_0 - libgfortran5=13.2.0=ha4646dd_0 - libgomp=11.2.0=h1234567_1 - libiconv=1.17=h166bdaf_0 - liblapack=3.9.0=16_linux64_openblas - libopenblas=0.3.21=h043d6bf_0 - libsodium=1.0.18=h36c2ea0_1 - libstdcxx-ng=11.2.0=h1234567_1 - lz4-c=1.9.4=h6a678d5_1 - markupsafe=2.1.1=py38h0a891b7_1 - matplotlib-inline=0.1.7=pyhd8ed1ab_0 - metis=5.1.0=h58526e2_1006 - mistune=3.0.2=pyhd8ed1ab_0 - mumps-include=5.2.1=ha770c72_14 - mumps-seq=5.2.1=h2104b81_11 - nbclassic=1.1.0=pyhd8ed1ab_0 - nbclient=0.10.2=pyhd8ed1ab_0 - nbconvert-core=7.16.4=pyhff2d567_2 - nbformat=5.10.4=pyhd8ed1ab_0 - ncurses=6.4=h6a678d5_0 - nest-asyncio=1.6.0=pyhd8ed1ab_0 - notebook=6.5.7=pyha770c72_0 - notebook-shim=0.2.4=pyhd8ed1ab_0 - numpy=1.22.3=py38h99721a1_2 - openssl=3.0.16=h5eee18b_0 - overrides=7.7.0=pyhd8ed1ab_0 - packaging=24.2=pyhd8ed1ab_2 - pandas=1.4.2=py38h47df419_1 - pandocfilters=1.5.0=pyhd8ed1ab_0 - parso=0.8.4=pyhd8ed1ab_0 - pexpect=4.9.0=pyhd8ed1ab_0 - pickleshare=0.7.5=py_1003 - pip=24.3.1=pyh8b19718_0 - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1 - platformdirs=4.3.6=pyhd8ed1ab_0 - pluggy=1.5.0=pyhd8ed1ab_0 - prometheus_client=0.21.0=pyhd8ed1ab_0 - prompt-toolkit=3.0.48=pyha770c72_0 - prompt_toolkit=3.0.48=hd8ed1ab_1 - psutil=5.9.1=py38h0a891b7_0 - ptyprocess=0.7.0=pyhd3deb0d_0 - pure_eval=0.2.3=pyhd8ed1ab_0 - pycparser=2.22=pyhd8ed1ab_0 - pygments=2.18.0=pyhd8ed1ab_0 - pyscipopt=3.5.0=py38h709712a_0 - pysocks=1.7.1=pyha2e5f31_6 - pytest=8.3.4=pyhd8ed1ab_0 - python=3.8.20=he870216_0 - python-dateutil=2.9.0=pyhd8ed1ab_0 - python-fastjsonschema=2.20.0=pyhd8ed1ab_0 - python-json-logger=2.0.7=pyhd8ed1ab_0 - python_abi=3.8=2_cp38 - pytz=2024.2=pyhd8ed1ab_0 - pyyaml=6.0=py38h0a891b7_4 - pyzmq=25.1.2=py38h6a678d5_0 - readline=8.2=h5eee18b_0 - referencing=0.35.1=pyhd8ed1ab_0 - requests=2.32.3=pyhd8ed1ab_0 - rfc3339-validator=0.1.4=pyhd8ed1ab_0 - rfc3986-validator=0.1.1=pyh9f0ad1d_0 - rpds-py=0.10.6=py38hb02cf49_0 - scip=7.0.3=hf5bcbcd_1 - scipy=1.8.1=py38h1ee437e_0 - scotch=6.0.9=h3858553_1 - send2trash=1.8.3=pyh0d859eb_0 - setuptools=75.1.0=py38h06a4308_0 - six=1.16.0=pyh6c4a22f_0 - sniffio=1.3.1=pyhd8ed1ab_0 - soupsieve=2.5=pyhd8ed1ab_1 - sqlite=3.45.3=h5eee18b_0 - stack_data=0.6.2=pyhd8ed1ab_0 - tbb=2020.2=h4bd325d_4 - terminado=0.18.1=pyh0d859eb_0 - tinycss2=1.4.0=pyhd8ed1ab_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.2=pyhd8ed1ab_0 - tornado=6.4.1=py38h5eee18b_0 - tqdm=4.67.1=pyhd8ed1ab_0 - traitlets=5.14.3=pyhd8ed1ab_0 - types-python-dateutil=2.9.0.20241003=pyhff2d567_0 - typing-extensions=4.12.2=hd8ed1ab_0 - typing_extensions=4.12.2=pyha770c72_0 - typing_utils=0.1.0=pyhd8ed1ab_0 - unixodbc=2.3.10=h583eb01_0 - uri-template=1.3.0=pyhd8ed1ab_0 - urllib3=2.2.3=pyhd8ed1ab_0 - wcwidth=0.2.13=pyhd8ed1ab_0 - webcolors=24.8.0=pyhd8ed1ab_0 - webencodings=0.5.1=pyhd8ed1ab_2 - websocket-client=1.8.0=pyhd8ed1ab_0 - wheel=0.44.0=py38h06a4308_0 - widgetsnbextension=4.0.13=pyhd8ed1ab_0 - xz=5.6.4=h5eee18b_1 - yaml=0.2.5=h7f98852_2 - zeromq=4.3.5=h6a678d5_0 - zipp=3.21.0=pyhd8ed1ab_0 - zlib=1.2.13=h5eee18b_1 - zstandard=0.23.0=py38h2c38b39_0 - zstd=1.5.6=hc292b87_0 - pip: - click==8.1.8 - deprecated==1.2.18 - dimod==0.12.17 - dwave-networkx==0.8.15 - networkx==2.8.8 - tabulate==0.8.10 - tsplib95==0.7.1 - wrapt==1.17.2 prefix: /opt/conda/envs/GeCO
[ "geco/mips/tests/test_facility_location.py::test_capacitated_facility_location" ]
[]
[]
[]
MIT License
8,915
1,858
[ "geco/mips/facility_location.py", "geco/mips/knapsack.py", "geco/mips/scheduling.py" ]
ioos__erddapy-155
aa166c1eaf9a233114bd5709c80195e426564554
2020-11-11 00:06:26
aa166c1eaf9a233114bd5709c80195e426564554
diff --git a/erddapy/erddapy.py b/erddapy/erddapy.py index a70a7b9..da76b21 100644 --- a/erddapy/erddapy.py +++ b/erddapy/erddapy.py @@ -258,7 +258,9 @@ class ERDDAP: maxTime=kwargs.get("max_time", default), searchFor=search_for, ) - + # ERDDAP 2.10 no longer accepts strings placeholder for dates. + # Removing them entirely should be OK for older versions too. + url = url.replace("&minTime=(ANY)", "").replace("&maxTime=(ANY)", "") return url def get_info_url(
HTTP Error with erddap V2.10 seems a little something has changed and is throwing an error - I've not looked into the erddapy code but here is what I did (both on the swfsc erddap server and my own): ``` from erddapy import ERDDAP import pandas as pd e = ERDDAP(server="https://coastwatch.pfeg.noaa.gov/erddap") print(url) url = e.get_search_url(search_for="whoi", response="csv") df = pd.read_csv(url) print( f'We have {len(set(df["tabledap"].dropna()))} ' f'tabledap, {len(set(df["griddap"].dropna()))} ' f'griddap, and {len(set(df["wms"].dropna()))} wms endpoints.' ) ``` Throws a http 500 error and following the generated link gives the following explicit error: ``` Error { code=500; message="Internal Server Error: ERROR in parseISODateTime: for first character of dateTime='(ANY)' isn't a digit!"; } ```
ioos/erddapy
diff --git a/tests/cassettes/test_erddap2_10.yaml b/tests/cassettes/test_erddap2_10.yaml new file mode 100644 index 0000000..36f6081 --- /dev/null +++ b/tests/cassettes/test_erddap2_10.yaml @@ -0,0 +1,38 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - python-requests/2.24.0 + method: HEAD + uri: https://coastwatch.pfeg.noaa.gov/erddap/search/advanced.csv?page=1&itemsPerPage=1000&protocol=(ANY)&cdm_data_type=(ANY)&institution=(ANY)&ioos_category=(ANY)&keywords=(ANY)&long_name=(ANY)&standard_name=(ANY)&variableName=(ANY)&minLon=(ANY)&maxLon=(ANY)&minLat=(ANY)&maxLat=(ANY)&searchFor=whoi + response: + body: + string: '' + headers: + Connection: + - close + Content-Disposition: + - attachment;filename=AdvancedSearch.csv + Content-Encoding: + - gzip + Content-Length: + - '5231' + Content-Type: + - text/csv;charset=ISO-8859-1 + Date: + - Wed, 11 Nov 2020 13:50:28 GMT + Strict-Transport-Security: + - max-age=31536000; includeSubDomains + X-Frame-Options: + - SAMEORIGIN + status: + code: 200 + message: '' +version: 1 diff --git a/tests/test_erddapy.py b/tests/test_erddapy.py index 1fa561e..5ec9f1b 100644 --- a/tests/test_erddapy.py +++ b/tests/test_erddapy.py @@ -3,6 +3,7 @@ from datetime import datetime import pendulum import pytest import pytz +import requests from requests.exceptions import ReadTimeout @@ -96,3 +97,12 @@ def test_erddap_requests_kwargs(): with pytest.raises(ReadTimeout): connection.to_xarray() + + [email protected] [email protected]() +def test_erddap2_10(): + e = ERDDAP(server="https://coastwatch.pfeg.noaa.gov/erddap") + url = e.get_search_url(search_for="whoi", response="csv") + r = requests.head(url) + assert r.raise_for_status() is None
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 1 }
0.8
{ "env_vars": null, "env_yml_path": [ "environment.yml" ], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "environment.yml", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiofiles @ file:///home/conda/feedstock_root/build_artifacts/aiofiles_1664378549280/work aiosqlite @ file:///home/conda/feedstock_root/build_artifacts/aiosqlite_1715928379913/work antlr4-python3-runtime @ file:///home/conda/feedstock_root/build_artifacts/antlr-python-runtime_1636143373368/work anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1688651106312/work/dist argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1692818318753/work argon2-cffi-bindings @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi-bindings_1649500320262/work arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1662382474514/work attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1722977137225/work Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1702422572539/work backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1702571698061/work beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1705564648255/work bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1648883617327/work cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work Cartopy @ file:///home/conda/feedstock_root/build_artifacts/cartopy_1642060950241/work certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi cf-units @ file:///home/conda/feedstock_root/build_artifacts/cf-units_1640986232540/work cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1666183775483/work cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1663606412550/work charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work cloudpickle @ file:///home/conda/feedstock_root/build_artifacts/cloudpickle_1674202310934/work comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1710320294760/work coverage==7.2.7 cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work dask @ file:///home/conda/feedstock_root/build_artifacts/dask-core_1644602974678/work debugpy @ file:///home/conda/feedstock_root/build_artifacts/debugpy_1660619049122/work decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work -e git+https://github.com/ioos/erddapy.git@aa166c1eaf9a233114bd5709c80195e426564554#egg=erddapy exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1720869315914/work execnet==2.0.2 fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1718477020893/work/dist fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1666389892786/work fqdn @ file:///home/conda/feedstock_root/build_artifacts/fqdn_1638810296540/work/dist fsspec @ file:///home/conda/feedstock_root/build_artifacts/fsspec_1674184942191/work idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1653252814274/work importlib-resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1688813467203/work iniconfig==2.0.0 ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1666723258080/work ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1651240553635/work ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1724334859652/work isoduration @ file:///home/conda/feedstock_root/build_artifacts/isoduration_1638811571363/work/dist jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1715127149914/work json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1712986206667/work jsonpointer==2.0 jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema-meta_1669810440410/work jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1725037521377/work jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1676328545892/work jupyter-events @ file:///home/conda/feedstock_root/build_artifacts/jupyter_events_1690301630599/work jupyter-server @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_1671591499479/work jupyter-ydoc @ file:///home/conda/feedstock_root/build_artifacts/jupyter_ydoc_1685535850115/work/dist jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1673615989977/work jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1658332345782/work jupyter_server_fileid @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_fileid_1714390608391/work jupyter_server_ydoc @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_ydoc_1678043727957/work jupyterlab @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_1724937868967/work jupyterlab_pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1700744013163/work jupyterlab_server @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_server_1690205927615/work jupyterlab_widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1724331334887/work kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1657953088445/work locket @ file:///home/conda/feedstock_root/build_artifacts/locket_1650660393415/work MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1648737551960/work matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1661439848456/work matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1713250518406/work mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1698947099619/work munkres==1.1.4 nbclassic @ file:///home/conda/feedstock_root/build_artifacts/nbclassic_1716838762700/work nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1665125402713/work nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert-meta_1687202153002/work nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1679336765223/work nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1661987221388/work notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1715848908871/work notebook_shim @ file:///home/conda/feedstock_root/build_artifacts/notebook-shim_1707957777232/work numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1649806299270/work packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1696202382185/work pandas==1.3.5 pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1712320355065/work partd @ file:///home/conda/feedstock_root/build_artifacts/partd_1695667515973/work pendulum @ file:///home/conda/feedstock_root/build_artifacts/pendulum_1649254539292/work pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1706113125309/work pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1660385854171/work pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1694617248815/work pluggy==1.2.0 prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1727341649933/work psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1666155398032/work ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1700607939962/work pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work pyproj @ file:///home/conda/feedstock_root/build_artifacts/pyproj_1636547699801/work pyrsistent @ file:///home/conda/feedstock_root/build_artifacts/pyrsistent_1649013358450/work pyshp @ file:///home/conda/feedstock_root/build_artifacts/pyshp_1659002966020/work PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1648857264451/work pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work python-json-logger @ file:///home/conda/feedstock_root/build_artifacts/python-json-logger_1677079630776/work pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work pytzdata @ file:///home/conda/feedstock_root/build_artifacts/pytzdata_1594644346367/work PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1648757092905/work pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1663830492333/work requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1716354486713/work rfc3339-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3339-validator_1638811747357/work rfc3986-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3986-validator_1598024191506/work scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1637806658031/work scitools-iris @ file:///home/conda/feedstock_root/build_artifacts/iris_1637936208876/work Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1712584999685/work Shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1637399855493/work six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1708952932303/work soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1658207591808/work terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1670253674810/work tinycss2 @ file:///home/conda/feedstock_root/build_artifacts/tinycss2_1729802851396/work tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1727974628237/work toolz @ file:///home/conda/feedstock_root/build_artifacts/toolz_1706112571092/work tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1656937818679/work traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1675110562325/work typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1688315532570/work unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1649111917568/work uri-template @ file:///home/conda/feedstock_root/build_artifacts/uri-template_1688655812972/work/dist urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1708239446578/work wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1699959196938/work webcolors @ file:///home/conda/feedstock_root/build_artifacts/webcolors_1723294704277/work webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work websocket-client @ file:///home/conda/feedstock_root/build_artifacts/websocket-client_1687789148259/work widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1724331337528/work xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1639125986756/work xxhash @ file:///home/conda/feedstock_root/build_artifacts/python-xxhash_1649442453935/work y-py @ file:///home/conda/feedstock_root/build_artifacts/y-py_1658953063597/work ypy-websocket @ file:///home/conda/feedstock_root/build_artifacts/ypy-websocket_1670333059911/work zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1677313463193/work
name: erddapy channels: - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=conda_forge - _openmp_mutex=4.5=2_gnu - aiofiles=22.1.0=pyhd8ed1ab_0 - aiosqlite=0.20.0=pyhd8ed1ab_0 - antlr-python-runtime=4.7.2=py37h89c1867_1003 - anyio=3.7.1=pyhd8ed1ab_0 - argon2-cffi=23.1.0=pyhd8ed1ab_0 - argon2-cffi-bindings=21.2.0=py37h540881e_2 - arrow=1.2.3=pyhd8ed1ab_0 - attrs=24.2.0=pyh71513ae_0 - babel=2.14.0=pyhd8ed1ab_0 - backcall=0.2.0=pyh9f0ad1d_0 - backports=1.0=pyhd8ed1ab_4 - backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0 - beautifulsoup4=4.12.3=pyha770c72_0 - bleach=6.1.0=pyhd8ed1ab_0 - brotli=1.1.0=hb9d3cd8_2 - brotli-bin=1.1.0=hb9d3cd8_2 - brotli-python=1.0.9=py37hd23a5d3_7 - bzip2=1.0.8=h4bc722e_7 - c-ares=1.34.4=hb9d3cd8_0 - ca-certificates=2025.1.31=hbcca054_0 - cached-property=1.5.2=hd8ed1ab_1 - cached_property=1.5.2=pyha770c72_1 - cartopy=0.20.2=py37h9a08e6e_0 - certifi=2024.8.30=pyhd8ed1ab_0 - cf-units=3.0.1=py37hb1e94ed_2 - cffi=1.15.1=py37h43b0acd_1 - cftime=1.6.2=py37hc105733_0 - charset-normalizer=3.4.0=pyhd8ed1ab_0 - cloudpickle=2.2.1=pyhd8ed1ab_0 - comm=0.2.2=pyhd8ed1ab_0 - curl=8.12.1=h332b0f4_0 - cycler=0.11.0=pyhd8ed1ab_0 - dask-core=2022.2.0=pyhd8ed1ab_0 - debugpy=1.6.3=py37hd23a5d3_0 - decorator=5.1.1=pyhd8ed1ab_0 - defusedxml=0.7.1=pyhd8ed1ab_0 - entrypoints=0.4=pyhd8ed1ab_0 - exceptiongroup=1.2.2=pyhd8ed1ab_0 - fonttools=4.38.0=py37h540881e_0 - fqdn=1.5.1=pyhd8ed1ab_0 - freetype=2.13.3=h48d6fc4_0 - fsspec=2023.1.0=pyhd8ed1ab_0 - geos=3.10.1=h9c3ff4c_1 - hdf4=4.2.15=h9772cbc_5 - hdf5=1.12.2=nompi_h4df4325_101 - idna=3.10=pyhd8ed1ab_0 - importlib-metadata=4.11.4=py37h89c1867_0 - importlib_metadata=4.11.4=hd8ed1ab_0 - importlib_resources=6.0.0=pyhd8ed1ab_0 - ipykernel=6.16.2=pyh210e3f2_0 - ipython=7.33.0=py37h89c1867_0 - ipython_genutils=0.2.0=pyhd8ed1ab_1 - ipywidgets=8.1.5=pyhd8ed1ab_0 - iris=3.1.0=pyhd8ed1ab_3 - isoduration=20.11.0=pyhd8ed1ab_0 - jedi=0.19.1=pyhd8ed1ab_0 - jinja2=3.1.4=pyhd8ed1ab_0 - jpeg=9e=h0b41bf4_3 - json5=0.9.25=pyhd8ed1ab_0 - jsonpointer=2.0=py_0 - jsonschema=4.17.3=pyhd8ed1ab_0 - jsonschema-with-format-nongpl=4.17.3=pyhd8ed1ab_0 - jupyter=1.1.1=pyhd8ed1ab_0 - jupyter_client=7.4.9=pyhd8ed1ab_0 - jupyter_console=6.5.1=pyhd8ed1ab_0 - jupyter_core=4.11.1=py37h89c1867_0 - jupyter_events=0.6.3=pyhd8ed1ab_1 - jupyter_server=1.23.4=pyhd8ed1ab_0 - jupyter_server_fileid=0.9.2=pyhd8ed1ab_0 - jupyter_server_ydoc=0.8.0=pyhd8ed1ab_0 - jupyter_ydoc=0.2.4=pyhd8ed1ab_0 - jupyterlab=3.6.8=pyhd8ed1ab_0 - jupyterlab_pygments=0.3.0=pyhd8ed1ab_0 - jupyterlab_server=2.24.0=pyhd8ed1ab_0 - jupyterlab_widgets=3.0.13=pyhd8ed1ab_0 - keyutils=1.6.1=h166bdaf_0 - kiwisolver=1.4.4=py37h7cecad7_0 - krb5=1.21.3=h659f571_0 - lcms2=2.14=h6ed2654_0 - ld_impl_linux-64=2.43=h712a8e2_4 - lerc=4.0.0=h27087fc_0 - libaec=1.1.3=h59595ed_0 - libblas=3.9.0=20_linux64_openblas - libbrotlicommon=1.1.0=hb9d3cd8_2 - libbrotlidec=1.1.0=hb9d3cd8_2 - libbrotlienc=1.1.0=hb9d3cd8_2 - libcblas=3.9.0=20_linux64_openblas - libcurl=8.12.1=h332b0f4_0 - libdeflate=1.14=h166bdaf_0 - libedit=3.1.20250104=pl5321h7949ede_0 - libev=4.33=hd590300_2 - libexpat=2.6.4=h5888daf_0 - libffi=3.4.6=h2dba641_0 - libgcc=14.2.0=h767d61c_2 - libgcc-ng=14.2.0=h69a702a_2 - libgfortran=14.2.0=h69a702a_2 - libgfortran-ng=14.2.0=h69a702a_2 - libgfortran5=14.2.0=hf1ad2bd_2 - libgomp=14.2.0=h767d61c_2 - libiconv=1.18=h4ce23a2_1 - liblapack=3.9.0=20_linux64_openblas - liblzma=5.6.4=hb9d3cd8_0 - liblzma-devel=5.6.4=hb9d3cd8_0 - libnetcdf=4.8.1=nompi_h261ec11_106 - libnghttp2=1.64.0=h161d5f1_0 - libnsl=2.0.1=hd590300_0 - libopenblas=0.3.25=pthreads_h413a1c8_0 - libpng=1.6.47=h943b412_0 - libsodium=1.0.18=h36c2ea0_1 - libsqlite=3.49.1=hee588c1_2 - libssh2=1.11.1=hf672d98_0 - libstdcxx=14.2.0=h8f9b012_2 - libstdcxx-ng=14.2.0=h4852527_2 - libtiff=4.4.0=h82bc61c_5 - libudunits2=2.2.28=h40f5838_3 - libwebp-base=1.5.0=h851e524_0 - libxcb=1.13=h7f98852_1004 - libxml2=2.13.7=h0d44e9d_0 - libzip=1.11.2=h6991a6a_0 - libzlib=1.3.1=hb9d3cd8_2 - locket=1.0.0=pyhd8ed1ab_0 - markupsafe=2.1.1=py37h540881e_1 - matplotlib-base=3.5.3=py37hf395dca_2 - matplotlib-inline=0.1.7=pyhd8ed1ab_0 - mistune=3.0.2=pyhd8ed1ab_0 - munkres=1.1.4=pyh9f0ad1d_0 - nbclassic=1.1.0=pyhd8ed1ab_0 - nbclient=0.7.0=pyhd8ed1ab_0 - nbconvert-core=7.6.0=pyhd8ed1ab_0 - nbformat=5.8.0=pyhd8ed1ab_0 - ncurses=6.5=h2d0b736_3 - nest-asyncio=1.6.0=pyhd8ed1ab_0 - netcdf4=1.6.0=nompi_py37h7187172_102 - notebook=6.5.7=pyha770c72_0 - notebook-shim=0.2.4=pyhd8ed1ab_0 - numpy=1.21.6=py37h976b520_0 - openjpeg=2.5.0=h7d73246_1 - openssl=3.4.1=h7b32b05_0 - packaging=23.2=pyhd8ed1ab_0 - pandas=1.3.5=py37he8f5f7f_0 - pandocfilters=1.5.0=pyhd8ed1ab_0 - parso=0.8.4=pyhd8ed1ab_0 - partd=1.4.1=pyhd8ed1ab_0 - pendulum=2.1.2=py37h540881e_4 - pexpect=4.9.0=pyhd8ed1ab_0 - pickleshare=0.7.5=py_1003 - pillow=9.2.0=py37h850a105_2 - pip=24.0=pyhd8ed1ab_0 - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1 - proj=8.2.0=h277dcde_0 - prometheus_client=0.17.1=pyhd8ed1ab_0 - prompt-toolkit=3.0.48=pyha770c72_0 - prompt_toolkit=3.0.48=hd8ed1ab_1 - psutil=5.9.3=py37h540881e_0 - pthread-stubs=0.4=hb9d3cd8_1002 - ptyprocess=0.7.0=pyhd3deb0d_0 - pycparser=2.21=pyhd8ed1ab_0 - pygments=2.17.2=pyhd8ed1ab_0 - pyparsing=3.1.4=pyhd8ed1ab_0 - pyproj=3.2.1=py37hb589d83_5 - pyrsistent=0.18.1=py37h540881e_1 - pyshp=2.3.1=pyhd8ed1ab_0 - pysocks=1.7.1=py37h89c1867_5 - python=3.7.12=hf930737_100_cpython - python-dateutil=2.9.0=pyhd8ed1ab_0 - python-fastjsonschema=2.20.0=pyhd8ed1ab_0 - python-json-logger=2.0.7=pyhd8ed1ab_0 - python-xxhash=3.0.0=py37h540881e_1 - python_abi=3.7=4_cp37m - pytz=2024.2=pyhd8ed1ab_0 - pytzdata=2020.1=pyh9f0ad1d_0 - pyyaml=6.0=py37h540881e_4 - pyzmq=24.0.1=py37h0c0c2a8_0 - readline=8.2=h8c095d6_2 - requests=2.32.2=pyhd8ed1ab_0 - rfc3339-validator=0.1.4=pyhd8ed1ab_0 - rfc3986-validator=0.1.1=pyh9f0ad1d_0 - scipy=1.7.3=py37hf2a6cf1_0 - send2trash=1.8.3=pyh0d859eb_0 - setuptools=59.8.0=py37h89c1867_1 - shapely=1.8.0=py37h9b0f7a3_4 - six=1.16.0=pyh6c4a22f_0 - sniffio=1.3.1=pyhd8ed1ab_0 - soupsieve=2.3.2.post1=pyhd8ed1ab_0 - sqlite=3.49.1=h9eae976_2 - terminado=0.17.1=pyh41d4057_0 - tinycss2=1.4.0=pyhd8ed1ab_0 - tk=8.6.13=noxft_h4845f30_101 - tomli=2.0.2=pyhd8ed1ab_0 - toolz=0.12.1=pyhd8ed1ab_0 - tornado=6.2=py37h540881e_0 - traitlets=5.9.0=pyhd8ed1ab_0 - typing-extensions=4.7.1=hd8ed1ab_0 - typing_extensions=4.7.1=pyha770c72_0 - udunits2=2.2.28=h40f5838_3 - unicodedata2=14.0.0=py37h540881e_1 - uri-template=1.3.0=pyhd8ed1ab_0 - urllib3=2.2.1=pyhd8ed1ab_0 - wcwidth=0.2.10=pyhd8ed1ab_0 - webcolors=24.8.0=pyhd8ed1ab_0 - webencodings=0.5.1=pyhd8ed1ab_2 - websocket-client=1.6.1=pyhd8ed1ab_0 - wheel=0.42.0=pyhd8ed1ab_0 - widgetsnbextension=4.0.13=pyhd8ed1ab_0 - xarray=0.20.2=pyhd8ed1ab_0 - xorg-libxau=1.0.12=hb9d3cd8_0 - xorg-libxdmcp=1.1.5=hb9d3cd8_0 - xxhash=0.8.0=h7f98852_3 - xz=5.6.4=hbcc6ac9_0 - xz-gpl-tools=5.6.4=hbcc6ac9_0 - xz-tools=5.6.4=hb9d3cd8_0 - y-py=0.5.4=py37hbd0741f_0 - yaml=0.2.5=h7f98852_2 - ypy-websocket=0.8.2=pyhd8ed1ab_0 - zeromq=4.3.5=h75354e8_4 - zipp=3.15.0=pyhd8ed1ab_0 - zlib=1.3.1=hb9d3cd8_2 - zstd=1.5.7=hb8e6e7a_2 - pip: - coverage==7.2.7 - erddapy==0.8.0 - execnet==2.0.2 - iniconfig==2.0.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 prefix: /opt/conda/envs/erddapy
[ "tests/test_erddapy.py::test_erddap2_10" ]
[]
[ "tests/test_erddapy.py::test_parse_dates_naive_datetime", "tests/test_erddapy.py::test_parse_dates_utc_datetime", "tests/test_erddapy.py::test_parse_dates_utc_pendulum", "tests/test_erddapy.py::test_parse_dates_nonutc_datetime", "tests/test_erddapy.py::test_parse_dates_nonutc_pendulum", "tests/test_erddapy.py::test_parse_dates_from_string", "tests/test_erddapy.py::test__quote_string_constraints", "tests/test_erddapy.py::test_erddap_requests_kwargs" ]
[]
BSD 3-Clause "New" or "Revised" License
8,924
175
[ "erddapy/erddapy.py" ]
googlefonts__picosvg-131
253261829a32da86933b335fe613843b870973c7
2020-11-11 04:03:08
253261829a32da86933b335fe613843b870973c7
diff --git a/src/picosvg/svg.py b/src/picosvg/svg.py index b82838a..d8bd22f 100644 --- a/src/picosvg/svg.py +++ b/src/picosvg/svg.py @@ -733,8 +733,9 @@ class SVG: el, self.view_box() ) affine = gradient.gradientTransform - a, b, c, d, dx, dy = affine - if (dx, dy) == (0, 0): + a, b, c, d, e, f = affine + # no translate? nop! + if (e, f) == (0, 0): continue affine_prime = affine._replace(e=0, f=0) @@ -756,13 +757,24 @@ class SVG: # 2) - 1) bx` - bx` + dy` - (b/a)cy` = r2 - (b/a) * r1 # y` = (r2 - (b/a) * r1) / (d - (b/a)c) r1, r2 = affine.map_point((x, y)) - assert r1 == a * x + c * y + dx - assert r2 == b * x + d * y + dy - y_prime = (r2 - r1 * b / a) / (d - b * c / a) - - # Sub y` into 1) - # 1) x` = (r1 - cy`) / a - x_prime = (r1 - c * y_prime) / a + assert r1 == a * x + c * y + e + assert r2 == b * x + d * y + f + + if a != 0: + y_prime = (r2 - r1 * b / a) / (d - b * c / a) + + # Sub y` into 1) + # 1) x` = (r1 - cy`) / a + x_prime = (r1 - c * y_prime) / a + else: + # if a == 0 then above gives div / 0. Take a simpler path. + # 1) 0x` + cy` + 0 = 0x + cy + e + # y` = y + e/c + y_prime = y + e / c + # Sub y` into 2) + # 2) bx` + dy` + 0 = bx + dy + f + # x` = x + dy/b + f/b - dy`/b + x_prime = x + (d * y / b) + (f / b) - (d * y_prime / b) # sanity check: a`(x`, y`) should be a(x, y) # all our float brutality damages points; low tolerance sanity checks!
ZeroDivisionError when applying gradient translation the infamous baseball emoji (U+26BE) produces a ZeroDivisionError when passed through the latest picosvg (following #128) ``` $ picosvg ../color-fonts/font-srcs/noto-emoji/svg/emoji_u26be.svg Traceback (most recent call last): File "/Users/clupo/Github/nanoemoji/.venv/bin/picosvg", line 33, in <module> sys.exit(load_entry_point('picosvg', 'console_scripts', 'picosvg')()) File "/Users/clupo/Github/picosvg/src/picosvg/picosvg.py", line 39, in main svg = SVG.parse(input_file).topicosvg() File "/Users/clupo/Github/picosvg/src/picosvg/svg.py", line 822, in topicosvg svg.topicosvg(inplace=True) File "/Users/clupo/Github/picosvg/src/picosvg/svg.py", line 841, in topicosvg self._apply_gradient_translation(inplace=True) File "/Users/clupo/Github/picosvg/src/picosvg/svg.py", line 761, in _apply_gradient_translation y_prime = (r2 - r1 * b / a) / (d - b * c / a) ZeroDivisionError: float division by zero ``` Investigating
googlefonts/picosvg
diff --git a/tests/svg_test.py b/tests/svg_test.py index 81294a0..e89ed6a 100644 --- a/tests/svg_test.py +++ b/tests/svg_test.py @@ -436,6 +436,12 @@ def test_apply_style_attributes(actual, expected_result): '<radialGradient id="mbbox" cx="0.75" cy="0.75" r="0.40" gradientTransform="matrix(1 1 -0.7873 -0.001717 0.5 0)" gradientUnits="objectBoundingBox"/>', '<radialGradient id="mbbox" cx="0.748907" cy="0.11353" r="0.40" gradientTransform="matrix(1 1 -0.7873 -0.001717 0 0)" gradientUnits="objectBoundingBox"/>', ), + # Real example from emoji_u26BE + # https://github.com/googlefonts/picosvg/issues/129 + ( + '<radialGradient id="f" cx="-779.79" cy="3150" r="58.471" gradientTransform="matrix(0 1 -1 0 3082.5 1129.5)" gradientUnits="userSpaceOnUse"/>', + '<radialGradient id="f" cx="349.71" cy="67.5" r="58.471" gradientTransform="matrix(0 1 -1 0 0 0)" gradientUnits="userSpaceOnUse"/>', + ), ], ) def test_apply_gradient_translation(gradient_string, expected_result):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_media" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 lxml==5.3.1 packaging==24.2 -e git+https://github.com/googlefonts/picosvg.git@253261829a32da86933b335fe613843b870973c7#egg=picosvg pluggy==1.5.0 pytest==8.3.5 skia-pathops==0.8.0.post2 tomli==2.2.1
name: picosvg channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - lxml==5.3.1 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - skia-pathops==0.8.0.post2 - tomli==2.2.1 prefix: /opt/conda/envs/picosvg
[ "tests/svg_test.py::test_apply_gradient_translation[<radialGradient" ]
[]
[ "tests/svg_test.py::test_common_attrib[<path", "tests/svg_test.py::test_common_attrib[<rect", "tests/svg_test.py::test_common_attrib[<polyline", "tests/svg_test.py::test_common_attrib[<line", "tests/svg_test.py::test_shapes_to_paths[<path", "tests/svg_test.py::test_shapes_to_paths[<line", "tests/svg_test.py::test_shapes_to_paths[<rect", "tests/svg_test.py::test_shapes_to_paths[<polygon", "tests/svg_test.py::test_shapes_to_paths[<polyline", "tests/svg_test.py::test_shapes_to_paths[<circle", "tests/svg_test.py::test_shapes_to_paths[<ellipse", "tests/svg_test.py::test_iter[<line", "tests/svg_test.py::test_iter[<path", "tests/svg_test.py::test_apply_clip_path[clip-rect.svg-clip-rect-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-ellipse.svg-clip-ellipse-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-curves.svg-clip-curves-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-multirect.svg-clip-multirect-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-groups.svg-clip-groups-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-use.svg-clip-use-clipped.svg]", "tests/svg_test.py::test_apply_clip_path[clip-rule-evenodd.svg-clip-rule-evenodd-clipped.svg]", "tests/svg_test.py::test_resolve_use[use-ellipse.svg-use-ellipse-resolved.svg]", "tests/svg_test.py::test_ungroup[ungroup-before.svg-ungroup-after.svg]", "tests/svg_test.py::test_ungroup[ungroup-multiple-children-before.svg-ungroup-multiple-children-after.svg]", "tests/svg_test.py::test_ungroup[twemoji-lesotho-flag-before.svg-twemoji-lesotho-flag-after-ungroup.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-simplepath-before.svg-stroke-simplepath-after.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-path-before.svg-stroke-path-after.svg]", "tests/svg_test.py::test_strokes_to_paths[stroke-capjoinmiterlimit-before.svg-stroke-capjoinmiterlimit-after.svg]", "tests/svg_test.py::test_transform[rotated-rect.svg-rotated-rect-after.svg]", "tests/svg_test.py::test_transform[translate-rect.svg-translate-rect-after.svg]", "tests/svg_test.py::test_topicosvg[ungroup-before.svg-ungroup-nano.svg]", "tests/svg_test.py::test_topicosvg[ungroup-multiple-children-before.svg-ungroup-multiple-children-nano.svg]", "tests/svg_test.py::test_topicosvg[group-stroke-before.svg-group-stroke-nano.svg]", "tests/svg_test.py::test_topicosvg[arcs-before.svg-arcs-nano.svg]", "tests/svg_test.py::test_topicosvg[invisible-before.svg-invisible-nano.svg]", "tests/svg_test.py::test_topicosvg[transform-before.svg-transform-nano.svg]", "tests/svg_test.py::test_topicosvg[group-data-name-before.svg-group-data-name-after.svg]", "tests/svg_test.py::test_topicosvg[matrix-before.svg-matrix-nano.svg]", "tests/svg_test.py::test_topicosvg[degenerate-before.svg-degenerate-nano.svg]", "tests/svg_test.py::test_topicosvg[fill-rule-evenodd-before.svg-fill-rule-evenodd-nano.svg]", "tests/svg_test.py::test_topicosvg[twemoji-lesotho-flag-before.svg-twemoji-lesotho-flag-nano.svg]", "tests/svg_test.py::test_topicosvg[inline-css-style-before.svg-inline-css-style-nano.svg]", "tests/svg_test.py::test_topicosvg[clipped-strokes-before.svg-clipped-strokes-nano.svg]", "tests/svg_test.py::test_remove_unpainted_shapes[invisible-before.svg-invisible-after.svg]", "tests/svg_test.py::test_checkpicosvg[good-defs-0.svg-expected_violations0]", "tests/svg_test.py::test_checkpicosvg[bad-defs-0.svg-expected_violations1]", "tests/svg_test.py::test_checkpicosvg[bad-defs-1.svg-expected_violations2]", "tests/svg_test.py::test_viewbox[<svg", "tests/svg_test.py::test_remove_attributes[<svg", "tests/svg_test.py::test_tolerance[<svg", "tests/svg_test.py::test_parse_css_declarations[fill:none-None-expected_output0-]", "tests/svg_test.py::test_parse_css_declarations[fill:", "tests/svg_test.py::test_parse_css_declarations[", "tests/svg_test.py::test_parse_css_declarations[enable-background:new", "tests/svg_test.py::test_parse_css_declarations_invalid[foo;bar;]", "tests/svg_test.py::test_parse_css_declarations_invalid[foo:bar:baz;]", "tests/svg_test.py::test_apply_style_attributes[inline-css-style-before.svg-inline-css-style-after.svg]", "tests/svg_test.py::test_apply_gradient_translation[<linearGradient" ]
[]
Apache License 2.0
8,925
685
[ "src/picosvg/svg.py" ]
frictionlessdata__frictionless-py-525
30a44b05388f9def41a0c5ea009aa900a5aaf574
2020-11-11 09:11:32
38002600c959bbe2863d533177d56a9df89c9fbd
diff --git a/frictionless/query.py b/frictionless/query.py index 3315f852..247992c2 100644 --- a/frictionless/query.py +++ b/frictionless/query.py @@ -170,11 +170,11 @@ class Query(Metadata): "properties": { "pickFields": {"type": "array"}, "skipFields": {"type": "array"}, - "limitFields": {"type": "number"}, - "offsetFields": {"type": "number"}, + "limitFields": {"type": "number", "minimum": 1}, + "offsetFields": {"type": "number", "minimum": 1}, "pickRows": {"type": "array"}, "skipRows": {"type": "array"}, - "limitRows": {"type": "number"}, - "offsetRows": {"type": "number"}, + "limitRows": {"type": "number", "minimum": 1}, + "offsetRows": {"type": "number", "minimum": 1}, }, }
Query(limit_rows=0) doesn't limit rows # Overview From here - https://github.com/frictionlessdata/frictionless-py/issues/516. Also the same must be for limit_fields
frictionlessdata/frictionless-py
diff --git a/tests/test_table.py b/tests/test_table.py index 97d23601..f269c50c 100644 --- a/tests/test_table.py +++ b/tests/test_table.py @@ -851,6 +851,50 @@ def test_table_limit_offset_rows(): assert table.read_data() == [["3", "c"], ["4", "d"]] +def test_table_limit_fields_error_zero_issue_521(): + source = "data/long.csv" + query = Query(limit_fields=0) + table = Table(source, query=query) + with pytest.raises(exceptions.FrictionlessException) as excinfo: + table.open() + error = excinfo.value.error + assert error.code == "query-error" + assert error.note.count('minimum of 1" at "limitFields') + + +def test_table_offset_fields_error_zero_issue_521(): + source = "data/long.csv" + query = Query(offset_fields=0) + table = Table(source, query=query) + with pytest.raises(exceptions.FrictionlessException) as excinfo: + table.open() + error = excinfo.value.error + assert error.code == "query-error" + assert error.note.count('minimum of 1" at "offsetFields') + + +def test_table_limit_rows_error_zero_issue_521(): + source = "data/long.csv" + query = Query(limit_rows=0) + table = Table(source, query=query) + with pytest.raises(exceptions.FrictionlessException) as excinfo: + table.open() + error = excinfo.value.error + assert error.code == "query-error" + assert error.note.count('minimum of 1" at "limitRows') + + +def test_table_offset_rows_error_zero_issue_521(): + source = "data/long.csv" + query = Query(offset_rows=0) + table = Table(source, query=query) + with pytest.raises(exceptions.FrictionlessException) as excinfo: + table.open() + error = excinfo.value.error + assert error.code == "query-error" + assert error.note.count('minimum of 1" at "offsetRows') + + # Header
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 1 }
3.27
{ "env_vars": null, "env_yml_path": null, "install": "pip install --upgrade -e .[bigquery,ckan,excel,gsheets,html,json,ods,pandas,s3,server,spss,sql,dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc", "apt-get install -y postgresql libpq-dev" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asttokens==3.0.0 attrs==25.3.0 beautifulsoup4==4.13.3 black==23.12.1 bleach==6.2.0 blinker==1.9.0 boto3==1.37.23 botocore==1.37.23 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 ckanapi==4.8 click==8.1.8 coverage==7.8.0 coveralls==4.0.1 cryptography==44.0.2 cssselect==1.3.0 databind==4.5.2 databind.core==4.5.2 databind.json==4.5.2 decorator==5.2.1 defusedxml==0.7.1 Deprecated==1.2.18 docopt==0.6.2 docspec==2.2.1 docspec-python==2.2.1 docstring_parser==0.11 et_xmlfile==2.0.0 exceptiongroup==1.2.2 execnet==2.1.1 executing==2.2.0 ezodf==0.3.2 fastjsonschema==2.21.1 filelock==3.18.0 Flask==3.1.0 -e git+https://github.com/frictionlessdata/frictionless-py.git@30a44b05388f9def41a0c5ea009aa900a5aaf574#egg=frictionless gdown==5.2.0 google-api-core==2.24.2 google-api-python-client==2.166.0 google-auth==2.38.0 google-auth-httplib2==0.2.0 googleapis-common-protos==1.69.2 greenlet==3.1.1 gunicorn==23.0.0 httplib2==0.22.0 idna==3.10 ijson==3.3.0 importlib_metadata==8.6.1 iniconfig==2.1.0 ipython==8.18.1 isodate==0.7.2 itsdangerous==2.2.0 jedi==0.19.2 Jinja2==3.1.6 jmespath==1.0.1 jsonlines==4.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyterlab_pygments==0.3.0 lxml==5.3.1 MarkupSafe==3.0.2 matplotlib-inline==0.1.7 mccabe==0.7.0 mistune==3.1.3 moto==5.1.2 mypy==1.15.0 mypy-extensions==1.0.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nr-date==2.1.0 nr-stream==1.1.5 nr.util==0.8.12 numpy==2.0.2 oauth2client==4.1.3 openpyxl==3.1.5 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 parso==0.8.4 pathspec==0.12.1 petl==1.7.15 pexpect==4.9.0 platformdirs==4.3.7 pluggy==1.5.0 prompt_toolkit==3.0.50 proto-plus==1.26.1 protobuf==6.30.2 psycopg2==2.9.10 ptyprocess==0.7.0 pure_eval==0.2.3 pyasn1==0.6.1 pyasn1_modules==0.4.2 pycodestyle==2.13.0 pycparser==2.22 pydoc-markdown==4.8.2 pydocstyle==6.3.0 pyflakes==3.3.2 Pygments==2.19.1 pylama==8.4.1 PyMySQL==1.1.1 pyparsing==3.2.3 pyquery==2.0.1 PySocks==1.7.1 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 python-dotenv==1.1.0 python-slugify==8.0.4 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 requests==2.32.3 responses==0.25.7 rfc3986==2.0.0 rpds-py==0.24.0 rsa==4.9 s3transfer==0.11.4 savReaderWriter==3.4.2 simpleeval==1.0.3 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 soupsieve==2.6 SQLAlchemy==2.0.40 stack-data==0.6.3 stringcase==1.2.0 text-unidecode==1.3 tinycss2==1.4.0 tomli==2.2.1 tomli_w==1.2.0 tornado==6.4.2 tqdm==4.67.1 traitlets==5.14.3 typeapi==2.2.4 typing_extensions==4.13.0 tzdata==2025.2 unicodecsv==0.14.1 uritemplate==4.1.1 urllib3==1.26.20 watchdog==6.0.0 wcwidth==0.2.13 webencodings==0.5.1 Werkzeug==3.1.3 wrapt==1.17.2 xlrd==2.0.1 xlwt==1.3.0 xmltodict==0.14.2 yapf==0.43.0 zipp==3.21.0
name: frictionless-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asttokens==3.0.0 - attrs==25.3.0 - beautifulsoup4==4.13.3 - black==23.12.1 - bleach==6.2.0 - blinker==1.9.0 - boto3==1.37.23 - botocore==1.37.23 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - ckanapi==4.8 - click==8.1.8 - coverage==7.8.0 - coveralls==4.0.1 - cryptography==44.0.2 - cssselect==1.3.0 - databind==4.5.2 - databind-core==4.5.2 - databind-json==4.5.2 - decorator==5.2.1 - defusedxml==0.7.1 - deprecated==1.2.18 - docopt==0.6.2 - docspec==2.2.1 - docspec-python==2.2.1 - docstring-parser==0.11 - et-xmlfile==2.0.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - executing==2.2.0 - ezodf==0.3.2 - fastjsonschema==2.21.1 - filelock==3.18.0 - flask==3.1.0 - frictionless==3.27.2 - gdown==5.2.0 - google-api-core==2.24.2 - google-api-python-client==2.166.0 - google-auth==2.38.0 - google-auth-httplib2==0.2.0 - googleapis-common-protos==1.69.2 - greenlet==3.1.1 - gunicorn==23.0.0 - httplib2==0.22.0 - idna==3.10 - ijson==3.3.0 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipython==8.18.1 - isodate==0.7.2 - itsdangerous==2.2.0 - jedi==0.19.2 - jinja2==3.1.6 - jmespath==1.0.1 - jsonlines==4.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyterlab-pygments==0.3.0 - lxml==5.3.1 - markupsafe==3.0.2 - matplotlib-inline==0.1.7 - mccabe==0.7.0 - mistune==3.1.3 - moto==5.1.2 - mypy==1.15.0 - mypy-extensions==1.0.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nr-date==2.1.0 - nr-stream==1.1.5 - nr-util==0.8.12 - numpy==2.0.2 - oauth2client==4.1.3 - openpyxl==3.1.5 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - parso==0.8.4 - pathspec==0.12.1 - petl==1.7.15 - pexpect==4.9.0 - platformdirs==4.3.7 - pluggy==1.5.0 - prompt-toolkit==3.0.50 - proto-plus==1.26.1 - protobuf==6.30.2 - psycopg2==2.9.10 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pycodestyle==2.13.0 - pycparser==2.22 - pydoc-markdown==4.8.2 - pydocstyle==6.3.0 - pyflakes==3.3.2 - pygments==2.19.1 - pylama==8.4.1 - pymysql==1.1.1 - pyparsing==3.2.3 - pyquery==2.0.1 - pysocks==1.7.1 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - python-dotenv==1.1.0 - python-slugify==8.0.4 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - responses==0.25.7 - rfc3986==2.0.0 - rpds-py==0.24.0 - rsa==4.9 - s3transfer==0.11.4 - savreaderwriter==3.4.2 - simpleeval==1.0.3 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - soupsieve==2.6 - sqlalchemy==2.0.40 - stack-data==0.6.3 - stringcase==1.2.0 - text-unidecode==1.3 - tinycss2==1.4.0 - tomli==2.2.1 - tomli-w==1.2.0 - tornado==6.4.2 - tqdm==4.67.1 - traitlets==5.14.3 - typeapi==2.2.4 - typing-extensions==4.13.0 - tzdata==2025.2 - unicodecsv==0.14.1 - uritemplate==4.1.1 - urllib3==1.26.20 - watchdog==6.0.0 - wcwidth==0.2.13 - webencodings==0.5.1 - werkzeug==3.1.3 - wrapt==1.17.2 - xlrd==2.0.1 - xlwt==1.3.0 - xmltodict==0.14.2 - yapf==0.43.0 - zipp==3.21.0 prefix: /opt/conda/envs/frictionless-py
[ "tests/test_table.py::test_table_limit_fields_error_zero_issue_521", "tests/test_table.py::test_table_offset_fields_error_zero_issue_521", "tests/test_table.py::test_table_limit_rows_error_zero_issue_521", "tests/test_table.py::test_table_offset_rows_error_zero_issue_521" ]
[ "tests/test_table.py::test_table_format_tsv", "tests/test_table.py::test_table_header_json_keyed", "tests/test_table.py::test_table_wrong_encoding_detection_issue_265" ]
[ "tests/test_table.py::test_table", "tests/test_table.py::test_table_read_data", "tests/test_table.py::test_table_data_stream", "tests/test_table.py::test_table_data_stream_iterate", "tests/test_table.py::test_table_read_rows", "tests/test_table.py::test_table_row_stream", "tests/test_table.py::test_table_row_stream_iterate", "tests/test_table.py::test_table_row_stream_error_cells", "tests/test_table.py::test_table_row_stream_blank_cells", "tests/test_table.py::test_table_empty", "tests/test_table.py::test_table_without_rows", "tests/test_table.py::test_table_without_headers", "tests/test_table.py::test_table_error_read_closed", "tests/test_table.py::test_table_source_error_data", "tests/test_table.py::test_table_scheme_file", "tests/test_table.py::test_table_scheme_stream", "tests/test_table.py::test_table_scheme_text", "tests/test_table.py::test_table_scheme_error_bad_scheme", "tests/test_table.py::test_table_scheme_error_bad_scheme_and_format", "tests/test_table.py::test_table_scheme_error_file_not_found", "tests/test_table.py::test_table_scheme_error_file_not_found_bad_format", "tests/test_table.py::test_table_scheme_error_file_not_found_bad_compression", "tests/test_table.py::test_table_format_csv", "tests/test_table.py::test_table_format_ndjson", "tests/test_table.py::test_table_format_xls", "tests/test_table.py::test_table_format_xlsx", "tests/test_table.py::test_table_format_error_bad_format", "tests/test_table.py::test_table_format_error_non_matching_format", "tests/test_table.py::test_table_hashing", "tests/test_table.py::test_table_hashing_provided", "tests/test_table.py::test_table_hashing_error_bad_hashing", "tests/test_table.py::test_table_encoding", "tests/test_table.py::test_table_encoding_explicit_utf8", "tests/test_table.py::test_table_encoding_explicit_latin1", "tests/test_table.py::test_table_encoding_utf_16", "tests/test_table.py::test_table_encoding_error_bad_encoding", "tests/test_table.py::test_table_encoding_error_non_matching_encoding", "tests/test_table.py::test_table_compression_local_csv_zip", "tests/test_table.py::test_table_compression_local_csv_zip_multiple_files", "tests/test_table.py::test_table_compression_local_csv_zip_multiple_files_compression_path", "tests/test_table.py::test_table_compression_local_csv_zip_multiple_open", "tests/test_table.py::test_table_compression_local_csv_gz", "tests/test_table.py::test_table_compression_filelike_csv_zip", "tests/test_table.py::test_table_compression_filelike_csv_gz", "tests/test_table.py::test_table_compression_error_bad", "tests/test_table.py::test_table_compression_error_invalid_zip", "tests/test_table.py::test_table_compression_error_invalid_gz", "tests/test_table.py::test_table_control", "tests/test_table.py::test_table_control_bad_property", "tests/test_table.py::test_table_dialect", "tests/test_table.py::test_table_dialect_csv_delimiter", "tests/test_table.py::test_table_dialect_json_property", "tests/test_table.py::test_table_dialect_bad_property", "tests/test_table.py::test_table_dialect_header_case_default", "tests/test_table.py::test_table_dialect_header_case_is_false", "tests/test_table.py::test_table_pick_fields", "tests/test_table.py::test_table_pick_fields_position", "tests/test_table.py::test_table_pick_fields_regex", "tests/test_table.py::test_table_pick_fields_position_and_prefix", "tests/test_table.py::test_table_skip_fields", "tests/test_table.py::test_table_skip_fields_position", "tests/test_table.py::test_table_skip_fields_regex", "tests/test_table.py::test_table_skip_fields_position_and_prefix", "tests/test_table.py::test_table_skip_fields_blank_header", "tests/test_table.py::test_table_skip_fields_blank_header_notation", "tests/test_table.py::test_table_skip_fields_keyed_source", "tests/test_table.py::test_table_limit_fields", "tests/test_table.py::test_table_offset_fields", "tests/test_table.py::test_table_limit_offset_fields", "tests/test_table.py::test_table_pick_rows", "tests/test_table.py::test_table_pick_rows_number", "tests/test_table.py::test_table_pick_rows_regex", "tests/test_table.py::test_table_skip_rows", "tests/test_table.py::test_table_skip_rows_excel_empty_column", "tests/test_table.py::test_table_skip_rows_with_headers", "tests/test_table.py::test_table_skip_rows_with_headers_example_from_readme", "tests/test_table.py::test_table_skip_rows_regex", "tests/test_table.py::test_table_skip_rows_preset", "tests/test_table.py::test_table_limit_rows", "tests/test_table.py::test_table_offset_rows", "tests/test_table.py::test_table_limit_offset_rows", "tests/test_table.py::test_table_header", "tests/test_table.py::test_table_header_unicode", "tests/test_table.py::test_table_header_stream_context_manager", "tests/test_table.py::test_table_header_inline", "tests/test_table.py::test_table_header_inline_keyed", "tests/test_table.py::test_table_header_inline_keyed_headers_is_none", "tests/test_table.py::test_table_header_xlsx_multiline", "tests/test_table.py::test_table_header_csv_multiline_headers_join", "tests/test_table.py::test_table_header_csv_multiline_headers_duplicates", "tests/test_table.py::test_table_header_strip_and_non_strings", "tests/test_table.py::test_table_schema", "tests/test_table.py::test_table_schema_provided", "tests/test_table.py::test_table_sync_schema", "tests/test_table.py::test_table_schema_patch_schema", "tests/test_table.py::test_table_schema_patch_schema_missing_values", "tests/test_table.py::test_table_schema_infer_type", "tests/test_table.py::test_table_schema_infer_names", "tests/test_table.py::test_table_schema_lookup_foreign_keys", "tests/test_table.py::test_table_schema_lookup_foreign_keys_error", "tests/test_table.py::test_table_stats_hash", "tests/test_table.py::test_table_stats_hash_md5", "tests/test_table.py::test_table_stats_hash_sha1", "tests/test_table.py::test_table_stats_hash_sha256", "tests/test_table.py::test_table_stats_hash_sha512", "tests/test_table.py::test_table_stats_hash_compressed", "tests/test_table.py::test_table_stats_bytes", "tests/test_table.py::test_table_stats_bytes_compressed", "tests/test_table.py::test_table_stats_fields", "tests/test_table.py::test_table_stats_rows", "tests/test_table.py::test_table_stats_rows_significant", "tests/test_table.py::test_table_reopen", "tests/test_table.py::test_table_reopen_and_infer_volume", "tests/test_table.py::test_table_reopen_generator", "tests/test_table.py::test_table_write", "tests/test_table.py::test_table_write_format_error_bad_format", "tests/test_table.py::test_table_integrity_onerror", "tests/test_table.py::test_table_integrity_onerror_header_warn", "tests/test_table.py::test_table_integrity_onerror_header_raise", "tests/test_table.py::test_table_integrity_onerror_row_warn", "tests/test_table.py::test_table_integrity_onerror_row_raise", "tests/test_table.py::test_table_integrity_unique", "tests/test_table.py::test_table_integrity_unique_error", "tests/test_table.py::test_table_integrity_primary_key", "tests/test_table.py::test_table_integrity_primary_key_error", "tests/test_table.py::test_table_integrity_foreign_keys", "tests/test_table.py::test_table_integrity_foreign_keys_error", "tests/test_table.py::test_table_reset_on_close_issue_190", "tests/test_table.py::test_table_skip_blank_at_the_end_issue_bco_dmo_33", "tests/test_table.py::test_table_not_existent_local_file_with_no_format_issue_287", "tests/test_table.py::test_table_skip_rows_non_string_cell_issue_320", "tests/test_table.py::test_table_skip_rows_non_string_cell_issue_322" ]
[]
MIT License
8,927
253
[ "frictionless/query.py" ]
pgmpy__pgmpy-1349
84046ca382bff08944e05f5d3f30c262bbbd372e
2020-11-11 09:37:55
c3cfe6cc54f40202aceeea5a424282bd9fa3171b
codecov[bot]: # [Codecov](https://codecov.io/gh/pgmpy/pgmpy/pull/1349?src=pr&el=h1) Report > Merging [#1349](https://codecov.io/gh/pgmpy/pgmpy/pull/1349?src=pr&el=desc) (0eeecb3) into [dev](https://codecov.io/gh/pgmpy/pgmpy/commit/84046ca382bff08944e05f5d3f30c262bbbd372e?el=desc) (84046ca) will **decrease** coverage by `0.02%`. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/graphs/tree.svg?width=650&height=150&src=pr&token=UaJMCdHaEF)](https://codecov.io/gh/pgmpy/pgmpy/pull/1349?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## dev #1349 +/- ## ========================================== - Coverage 93.18% 93.16% -0.03% ========================================== Files 132 132 Lines 13245 13199 -46 ========================================== - Hits 12343 12297 -46 Misses 902 902 ``` | [Impacted Files](https://codecov.io/gh/pgmpy/pgmpy/pull/1349?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [pgmpy/estimators/HillClimbSearch.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvZXN0aW1hdG9ycy9IaWxsQ2xpbWJTZWFyY2gucHk=) | `92.13% <ø> (ø)` | | | [...gmpy/tests/test\_estimators/test\_HillClimbSearch.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvdGVzdHMvdGVzdF9lc3RpbWF0b3JzL3Rlc3RfSGlsbENsaW1iU2VhcmNoLnB5) | `98.66% <100.00%> (+0.09%)` | :arrow_up: | | [.../test\_models/test\_LinearGaussianBayesianNetwork.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvdGVzdHMvdGVzdF9tb2RlbHMvdGVzdF9MaW5lYXJHYXVzc2lhbkJheWVzaWFuTmV0d29yay5weQ==) | `39.06% <0.00%> (-3.59%)` | :arrow_down: | | [...actors/test\_continuous/test\_Linear\_Gaussain\_CPD.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvdGVzdHMvdGVzdF9mYWN0b3JzL3Rlc3RfY29udGludW91cy90ZXN0X0xpbmVhcl9HYXVzc2Fpbl9DUEQucHk=) | `40.00% <0.00%> (-2.86%)` | :arrow_down: | | [pgmpy/tests/test\_estimators/test\_MmhcEstimator.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvdGVzdHMvdGVzdF9lc3RpbWF0b3JzL3Rlc3RfTW1oY0VzdGltYXRvci5weQ==) | `58.82% <0.00%> (-2.29%)` | :arrow_down: | | [pgmpy/factors/continuous/discretize.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvZmFjdG9ycy9jb250aW51b3VzL2Rpc2NyZXRpemUucHk=) | `34.21% <0.00%> (-1.69%)` | :arrow_down: | | [pgmpy/tests/test\_estimators/test\_SEMEstimator.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvdGVzdHMvdGVzdF9lc3RpbWF0b3JzL3Rlc3RfU0VNRXN0aW1hdG9yLnB5) | `79.66% <0.00%> (-1.59%)` | :arrow_down: | | [pgmpy/factors/base.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvZmFjdG9ycy9iYXNlLnB5) | `84.21% <0.00%> (-0.79%)` | :arrow_down: | | [...gmpy/factors/distributions/GaussianDistribution.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvZmFjdG9ycy9kaXN0cmlidXRpb25zL0dhdXNzaWFuRGlzdHJpYnV0aW9uLnB5) | `69.00% <0.00%> (-0.61%)` | :arrow_down: | | [pgmpy/factors/continuous/ContinuousFactor.py](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree#diff-cGdtcHkvZmFjdG9ycy9jb250aW51b3VzL0NvbnRpbnVvdXNGYWN0b3IucHk=) | `86.11% <0.00%> (-0.38%)` | :arrow_down: | | ... and [12 more](https://codecov.io/gh/pgmpy/pgmpy/pull/1349/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/pgmpy/pgmpy/pull/1349?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/pgmpy/pgmpy/pull/1349?src=pr&el=footer). Last update [84046ca...0eeecb3](https://codecov.io/gh/pgmpy/pgmpy/pull/1349?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/pgmpy/estimators/HillClimbSearch.py b/pgmpy/estimators/HillClimbSearch.py index 66b942d9..c19cd865 100644 --- a/pgmpy/estimators/HillClimbSearch.py +++ b/pgmpy/estimators/HillClimbSearch.py @@ -284,7 +284,8 @@ class HillClimbSearch(StructureEstimator): iteration = range(int(max_iter)) # Step 2: For each iteration, find the best scoring operation and - # do that to the current model. + # do that to the current model. If no legal operation is + # possible, sets best_operation=None. for _ in iteration: best_operation, best_score_delta = max( self._legal_operations( @@ -297,6 +298,7 @@ class HillClimbSearch(StructureEstimator): fixed_edges, ), key=lambda t: t[1], + default=(None, None), ) if best_operation is None or best_score_delta < epsilon:
ValueError in HillClimbSearch().estimate() ### Subject of the issue It raises ValueError when estimating the graph with the HillClimbSearch model. ### Your environment * pgmpy version : 0.1.12 * Python version : 3.7.7 * Operating System : Ubuntu 18.04.2 LTS ### Steps to reproduce ```python3 data = pd.DataFrame( [[1,0,0,1,0,0,1,1,0], [1,0,1,0,0,1,0,1,0], [1,0,0,0,0,1,0,1,1], [1,1,0,1,0,1,1,0,0], [0,0,1,0,0,1,1,0,0]], columns=list('ABCDEFGHI') ) est = HillClimbSearch(data) best_model = est.estimate( fixed_edges=[('A','B'), ('B', 'C')], white_list=[('F','C')] ) ``` ### Expected behaviour 1. return model with fixed edges or 2. return model with fixed_edges and white_list edges ### Actual behaviour ![image](https://user-images.githubusercontent.com/23306402/98773788-01f69e80-242d-11eb-904f-9295b00fc147.png)
pgmpy/pgmpy
diff --git a/pgmpy/tests/test_estimators/test_HillClimbSearch.py b/pgmpy/tests/test_estimators/test_HillClimbSearch.py index a1c147b9..3aa4a34e 100644 --- a/pgmpy/tests/test_estimators/test_HillClimbSearch.py +++ b/pgmpy/tests/test_estimators/test_HillClimbSearch.py @@ -206,6 +206,25 @@ class TestHillClimbEstimator(unittest.TestCase): in self.est_titanic2.estimate(fixed_edges=[("Pclass", "Survived")]).edges() ) + def test_no_legal_operation(self): + data = pd.DataFrame( + [ + [1, 0, 0, 1, 0, 0, 1, 1, 0], + [1, 0, 1, 0, 0, 1, 0, 1, 0], + [1, 0, 0, 0, 0, 1, 0, 1, 1], + [1, 1, 0, 1, 0, 1, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 1, 0, 0], + ], + columns=list("ABCDEFGHI"), + ) + est = HillClimbSearch(data) + best_model = est.estimate( + fixed_edges=[("A", "B"), ("B", "C")], white_list=[("F", "C")] + ) + self.assertEqual( + set(best_model.edges()), set([("A", "B"), ("B", "C"), ("F", "C")]) + ) + def tearDown(self): del self.rand_data del self.est_rand
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements/runtime.txt", "requirements/tests.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 black==22.8.0 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 codecov==2.1.13 coverage==6.2 dataclasses==0.8 decorator==4.4.2 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 joblib==1.1.1 mock==5.2.0 mypy-extensions==1.0.0 networkx==2.5.1 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pathspec==0.9.0 patsy==1.0.1 -e git+https://github.com/pgmpy/pgmpy.git@84046ca382bff08944e05f5d3f30c262bbbd372e#egg=pgmpy platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 scikit-learn==0.24.2 scipy==1.5.4 six==1.17.0 statsmodels==0.12.2 threadpoolctl==3.1.0 tomli==1.2.3 torch==1.10.2 tqdm==4.64.1 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 xdoctest==1.1.6 zipp==3.6.0
name: pgmpy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - black==22.8.0 - charset-normalizer==2.0.12 - click==8.0.4 - codecov==2.1.13 - coverage==6.2 - dataclasses==0.8 - decorator==4.4.2 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - joblib==1.1.1 - mock==5.2.0 - mypy-extensions==1.0.0 - networkx==2.5.1 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pathspec==0.9.0 - patsy==1.0.1 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - scikit-learn==0.24.2 - scipy==1.5.4 - six==1.17.0 - statsmodels==0.12.2 - threadpoolctl==3.1.0 - tomli==1.2.3 - torch==1.10.2 - tqdm==4.64.1 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - xdoctest==1.1.6 - zipp==3.6.0 prefix: /opt/conda/envs/pgmpy
[ "pgmpy/tests/test_estimators/test_HillClimbSearch.py::TestHillClimbEstimator::test_no_legal_operation" ]
[]
[ "pgmpy/tests/test_estimators/test_HillClimbSearch.py::TestHillClimbEstimator::test_estimate_rand", "pgmpy/tests/test_estimators/test_HillClimbSearch.py::TestHillClimbEstimator::test_estimate_titanic", "pgmpy/tests/test_estimators/test_HillClimbSearch.py::TestHillClimbEstimator::test_legal_operations", "pgmpy/tests/test_estimators/test_HillClimbSearch.py::TestHillClimbEstimator::test_legal_operations_blacklist_whitelist", "pgmpy/tests/test_estimators/test_HillClimbSearch.py::TestHillClimbEstimator::test_legal_operations_titanic" ]
[]
MIT License
8,928
266
[ "pgmpy/estimators/HillClimbSearch.py" ]
CharJon__GeCO-11
d39ca20573da84b79d58315c7f17e125b24470ff
2020-11-11 10:47:11
d39ca20573da84b79d58315c7f17e125b24470ff
diff --git a/geco/mips/facility_location.py b/geco/mips/facility_location.py index 7139f3e..e02f2ab 100644 --- a/geco/mips/facility_location.py +++ b/geco/mips/facility_location.py @@ -6,7 +6,12 @@ from networkx.utils import py_random_state @py_random_state(3) -def capacitated_facility_location(n_customers, n_facilities, ratio, seed=0): +def cornuejols_instance(n_customers, n_facilities, ratio, seed=0): + return capacitated_facility_location(n_customers, n_facilities, + *cornuejols_instance_params(n_customers, n_facilities, ratio, seed)) + + +def capacitated_facility_location(n_customers, n_facilities, transportation_cost, demands, fixed_costs, capacities): """ Generate a Capacited Facility Location problem following Cornuejols G, Sridharan R, Thizy J-M (1991) @@ -19,37 +24,16 @@ def capacitated_facility_location(n_customers, n_facilities, ratio, seed=0): The desired number of customers. n_facilities: int The desired number of facilities. - ratio: float - The desired capacity / demand ratio. - seed: integer, random_state, or None - Indicator of random number generation state. + transportation_cost: numpy array [float] + Matrix of transportation costs from customer i to facility j [i,j] + demands: numpy array [int] + Demands of each customer. + fixed_costs: numpy array [int] + Fixed costs of operating each facility. + capacities: numpy array [int] + Capacities of each facility. """ - # locations for customers - c_x = np.array([seed.random() for _ in range(n_customers)]) - c_y = np.array([seed.random() for _ in range(n_customers)]) - - # locations for facilities - f_x = np.array([seed.random() for _ in range(n_facilities)]) - f_y = np.array([seed.random() for _ in range(n_facilities)]) - - demands = np.array(seed.sample(range(5, 35 + 1), k=n_customers)) - capacities = np.array(seed.sample(range(10, 160 + 1), k=n_facilities)) - fixed_costs = np.array(seed.sample(range(100, 110 + 1), k=n_facilities) * np.sqrt(capacities)) \ - + np.array(seed.sample(range(90 + 1), k=n_facilities)) - fixed_costs = fixed_costs.astype(int) - total_demand = demands.sum() - total_capacity = capacities.sum() - - # adjust capacities according to ratio - capacities = capacities * ratio * total_demand / total_capacity - capacities = capacities.astype(int) - total_capacity = capacities.sum() - - # transportation costs - trans_costs = np.sqrt( - (c_x.reshape((-1, 1)) - f_x.reshape((1, -1))) ** 2 \ - + (c_y.reshape((-1, 1)) - f_y.reshape((1, -1))) ** 2) * 10 * demands.reshape((-1, 1)) model = scip.Model("Capacitated Facility Location") @@ -59,7 +43,7 @@ def capacitated_facility_location(n_customers, n_facilities, ratio, seed=0): facility_vars = [] # add customer-facility vars for i, j in itertools.product(range(n_customers), range(n_facilities)): - var = model.addVar(lb=0, ub=1, obj=trans_costs[i, j], name=f"x_{i}_{j}", vtype="B") + var = model.addVar(lb=0, ub=1, obj=transportation_cost[i, j], name=f"x_{i}_{j}", vtype="B") customer_facility_vars[i, j] = var # add facility vars for j in range(n_facilities): @@ -84,3 +68,32 @@ def capacitated_facility_location(n_customers, n_facilities, ratio, seed=0): model.addCons(customer_facility_vars[i, j] <= facility_vars[j]) return model + + +@py_random_state(3) +def cornuejols_instance_params(n_customers, n_facilities, ratio, seed): + # locations for customers + c_x = np.array([seed.random() for _ in range(n_customers)]) + c_y = np.array([seed.random() for _ in range(n_customers)]) + + # locations for facilities + f_x = np.array([seed.random() for _ in range(n_facilities)]) + f_y = np.array([seed.random() for _ in range(n_facilities)]) + + demands = np.array(seed.sample(range(5, 35 + 1), k=n_customers)) + capacities = np.array(seed.sample(range(10, 160 + 1), k=n_facilities)) + fixed_costs = np.array(seed.sample(range(100, 110 + 1), k=n_facilities) * np.sqrt(capacities)) + np.array( + seed.sample(range(90 + 1), k=n_facilities)) + fixed_costs = fixed_costs.astype(int) + + # adjust capacities according to ratio + total_demand = demands.sum() + total_capacity = capacities.sum() + capacities = capacities * ratio * total_demand / total_capacity + capacities = capacities.astype(int) + + # transportation cost + trans_costs = np.sqrt( + (c_x.reshape((-1, 1)) - f_x.reshape((1, -1))) ** 2 \ + + (c_y.reshape((-1, 1)) - f_y.reshape((1, -1))) ** 2) * 10 * demands.reshape((-1, 1)) + return trans_costs, demands, fixed_costs, capacities
Make facility location compliant to develop.md For this the parameter generator needs to be split from the mip generator.
CharJon/GeCO
diff --git a/geco/mips/tests/test_facility_location.py b/geco/mips/tests/test_facility_location.py index d21b2b4..22cc821 100644 --- a/geco/mips/tests/test_facility_location.py +++ b/geco/mips/tests/test_facility_location.py @@ -2,8 +2,9 @@ from geco.mips.facility_location import * def test_capacitated_facility_location(): - n_customers, n_facilities, ratio, seed = 25, 10, 2, 0 - model = capacitated_facility_location(n_customers, n_facilities, ratio, seed) + n_customers, n_facilities, ratio = 25, 10, 2 + instance_params = cornuejols_instance_params(n_customers, n_facilities, ratio, seed=0) + model = capacitated_facility_location(n_customers, n_facilities, *instance_params) assert model.getNVars() == n_customers * n_facilities + n_facilities assert model.getNConss() == n_customers + n_facilities + 1 + n_customers * n_facilities assert model.getObjectiveSense() == "minimize" diff --git a/geco/tests/test_generator.py b/geco/tests/test_generator.py index ffa959f..fe33add 100644 --- a/geco/tests/test_generator.py +++ b/geco/tests/test_generator.py @@ -5,8 +5,8 @@ from geco.generator import * def test_generator(): - from geco.mips.facility_location import capacitated_facility_location - gen = Generator(capacitated_facility_location, n_customers=10, n_facilities=3, ratio=2) + from geco.mips.facility_location import cornuejols_instance + gen = Generator(cornuejols_instance, n_customers=10, n_facilities=3, ratio=2) gen.seed(0) for model in itertools.islice(gen, 10): assert type(model) == scip.Model
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": [ "conda-dev-env.yml" ], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": true, "packages": "environment.yml", "pip_packages": null, "pre_install": null, "python": "3.8", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1726753373685/work argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1692818318753/work argon2-cffi-bindings @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi-bindings_1649500309442/work arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1696128962909/work asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1733175639022/work async-lru @ file:///home/conda/feedstock_root/build_artifacts/async-lru_1690563019058/work attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1722977137225/work babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1730878832677/work backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1705564648255/work bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1648883617327/work cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1636046063618/work charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work click==8.1.8 colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1710320294760/work coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1652409050186/work debugpy @ file:///croot/debugpy_1690905042057/work decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work Deprecated==1.2.18 dimod==0.12.17 dwave_networkx==0.8.15 entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1720869315914/work executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1725214404607/work fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1718477020893/work/dist fqdn @ file:///home/conda/feedstock_root/build_artifacts/fqdn_1638810296540/work/dist -e git+https://github.com/CharJon/GeCO.git@d39ca20573da84b79d58315c7f17e125b24470ff#egg=GeCO h11 @ file:///home/conda/feedstock_root/build_artifacts/h11_1664132893548/work h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1634280454336/work hpack==4.0.0 httpcore @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_httpcore_1731707562/work httpx @ file:///home/conda/feedstock_root/build_artifacts/httpx_1724778349782/work hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1619110129307/work idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1726082825846/work importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1725921340658/work iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1673103042956/work ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1719845459717/work ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1683289033986/work ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1724334859652/work isoduration @ file:///home/conda/feedstock_root/build_artifacts/isoduration_1638811571363/work/dist jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1715127149914/work json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1712986206667/work jsonpointer @ file:///home/conda/feedstock_root/build_artifacts/jsonpointer_1718283368615/work jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1720529478715/work jsonschema-specifications @ file:///tmp/tmpvslgxhz5/src jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1725037521377/work jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1678118109161/work jupyter-events @ file:///home/conda/feedstock_root/build_artifacts/jupyter_events_1710805637316/work jupyter-lsp @ file:///home/conda/feedstock_root/build_artifacts/jupyter-lsp-meta_1712707420468/work/jupyter-lsp jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1673615989977/work jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1727163409502/work jupyter_server @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_1720816649297/work jupyter_server_terminals @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_terminals_1710262634903/work jupyterlab @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_1730308726474/work jupyterlab_pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1707149102966/work jupyterlab_server @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_server-split_1721163288448/work jupyterlab_widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1724331334887/work MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1648737563195/work matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1713250518406/work mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1698947099619/work nbclassic @ file:///home/conda/feedstock_root/build_artifacts/nbclassic_1716838762700/work nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1734628800805/work nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert-meta_1733405477194/work nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1712238998817/work nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work networkx==2.8.8 notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1715848908871/work notebook_shim @ file:///home/conda/feedstock_root/build_artifacts/notebook-shim_1707957777232/work numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1651020413938/work overrides @ file:///home/conda/feedstock_root/build_artifacts/overrides_1706394519472/work packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work pandas==1.4.2 pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1712320355065/work pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1706113125309/work pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1694617248815/work platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1726613481435/work pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1713667077545/work prometheus_client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1726901976720/work prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1727341649933/work psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1653089172347/work ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl pure_eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1721585709575/work pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1711811537435/work Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1714846767233/work PySCIPOpt @ file:///home/conda/feedstock_root/build_artifacts/pyscipopt_1638955097361/work PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1661604839144/work pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1733087655016/work python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work python-json-logger @ file:///home/conda/feedstock_root/build_artifacts/python-json-logger_1677079630776/work pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1648757091578/work pyzmq @ file:///croot/pyzmq_1705605076900/work referencing @ file:///home/conda/feedstock_root/build_artifacts/referencing_1714619483868/work requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1717057054362/work rfc3339-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3339-validator_1638811747357/work rfc3986-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3986-validator_1598024191506/work rpds-py @ file:///croot/rpds-py_1698945930462/work scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1653073867187/work Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1712584999685/work six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1708952932303/work soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1693929250441/work stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work tabulate==0.8.10 terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1710262609923/work tinycss2 @ file:///home/conda/feedstock_root/build_artifacts/tinycss2_1729802851396/work tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1727974628237/work tornado @ file:///croot/tornado_1718740109488/work tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1732497199771/work traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1713535121073/work tsplib95 @ git+https://github.com/rhgrant10/tsplib95.git@57e73472ac2bdf64562b0c1cafa058395591da0a types-python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/types-python-dateutil_1727940235703/work typing-utils @ file:///home/conda/feedstock_root/build_artifacts/typing_utils_1622899189314/work typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1717802530399/work uri-template @ file:///home/conda/feedstock_root/build_artifacts/uri-template_1688655812972/work/dist urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1726496430923/work wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1704731205417/work webcolors @ file:///home/conda/feedstock_root/build_artifacts/webcolors_1723294704277/work webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work websocket-client @ file:///home/conda/feedstock_root/build_artifacts/websocket-client_1713923384721/work widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1724331337528/work wrapt==1.17.2 zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1731262100163/work zstandard @ file:///croot/zstandard_1728569189425/work
name: GeCO channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ampl-mp=3.1.0=h2cc385e_1006 - anyio=4.5.0=pyhd8ed1ab_0 - argon2-cffi=23.1.0=pyhd8ed1ab_0 - argon2-cffi-bindings=21.2.0=py38h0a891b7_2 - arrow=1.3.0=pyhd8ed1ab_0 - asttokens=3.0.0=pyhd8ed1ab_0 - async-lru=2.0.4=pyhd8ed1ab_0 - attrs=24.2.0=pyh71513ae_0 - babel=2.16.0=pyhd8ed1ab_0 - backcall=0.2.0=pyh9f0ad1d_0 - beautifulsoup4=4.12.3=pyha770c72_0 - bleach=6.1.0=pyhd8ed1ab_0 - brotli-python=1.0.9=py38hfa26641_7 - ca-certificates=2025.2.25=h06a4308_0 - cached-property=1.5.2=hd8ed1ab_1 - cached_property=1.5.2=pyha770c72_1 - certifi=2024.8.30=pyhd8ed1ab_0 - cffi=1.15.0=py38h3931269_0 - charset-normalizer=3.4.0=pyhd8ed1ab_0 - colorama=0.4.6=pyhd8ed1ab_0 - comm=0.2.2=pyhd8ed1ab_0 - coverage=6.3.3=py38h0a891b7_0 - cppad=20210000.6=h9c3ff4c_0 - debugpy=1.6.7=py38h6a678d5_0 - decorator=5.1.1=pyhd8ed1ab_0 - defusedxml=0.7.1=pyhd8ed1ab_0 - entrypoints=0.4=pyhd8ed1ab_0 - exceptiongroup=1.2.2=pyhd8ed1ab_0 - executing=2.1.0=pyhd8ed1ab_0 - fqdn=1.5.1=pyhd8ed1ab_0 - gmp=6.2.1=h58526e2_0 - h11=0.14.0=pyhd8ed1ab_0 - h2=4.1.0=pyhd8ed1ab_0 - hpack=4.0.0=pyh9f0ad1d_0 - httpcore=1.0.7=pyh29332c3_1 - httpx=0.27.2=pyhd8ed1ab_0 - hyperframe=6.0.1=pyhd8ed1ab_0 - idna=3.10=pyhd8ed1ab_0 - importlib-metadata=8.5.0=pyha770c72_0 - importlib_resources=6.4.5=pyhd8ed1ab_0 - iniconfig=2.0.0=pyhd8ed1ab_0 - ipopt=3.14.1=h7ede334_0 - ipykernel=6.29.5=pyh3099207_0 - ipython=8.12.2=pyh41d4057_0 - ipython_genutils=0.2.0=pyhd8ed1ab_1 - ipywidgets=8.1.5=pyhd8ed1ab_0 - isoduration=20.11.0=pyhd8ed1ab_0 - jedi=0.19.1=pyhd8ed1ab_0 - jinja2=3.1.4=pyhd8ed1ab_0 - json5=0.9.25=pyhd8ed1ab_0 - jsonpointer=3.0.0=py38h578d9bd_0 - jsonschema=4.23.0=pyhd8ed1ab_0 - jsonschema-specifications=2024.10.1=pyhd8ed1ab_0 - jsonschema-with-format-nongpl=4.23.0=hd8ed1ab_1 - jupyter=1.1.1=pyhd8ed1ab_0 - jupyter-lsp=2.2.5=pyhd8ed1ab_0 - jupyter_client=7.4.9=pyhd8ed1ab_0 - jupyter_console=6.6.3=pyhd8ed1ab_0 - jupyter_core=5.7.2=pyh31011fe_1 - jupyter_events=0.10.0=pyhd8ed1ab_0 - jupyter_server=2.14.2=pyhd8ed1ab_0 - jupyter_server_terminals=0.5.3=pyhd8ed1ab_0 - jupyterlab=4.3.0=pyhd8ed1ab_0 - jupyterlab_pygments=0.3.0=pyhd8ed1ab_1 - jupyterlab_server=2.27.3=pyhd8ed1ab_0 - jupyterlab_widgets=3.0.13=pyhd8ed1ab_0 - ld_impl_linux-64=2.40=h12ee557_0 - libblas=3.9.0=16_linux64_openblas - libcblas=3.9.0=16_linux64_openblas - libedit=3.1.20191231=he28a2e2_2 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgfortran-ng=13.2.0=h69a702a_0 - libgfortran5=13.2.0=ha4646dd_0 - libgomp=11.2.0=h1234567_1 - libiconv=1.17=h166bdaf_0 - liblapack=3.9.0=16_linux64_openblas - libopenblas=0.3.21=h043d6bf_0 - libsodium=1.0.18=h36c2ea0_1 - libstdcxx-ng=11.2.0=h1234567_1 - lz4-c=1.9.4=h6a678d5_1 - markupsafe=2.1.1=py38h0a891b7_1 - matplotlib-inline=0.1.7=pyhd8ed1ab_0 - metis=5.1.0=h58526e2_1006 - mistune=3.0.2=pyhd8ed1ab_0 - mumps-include=5.2.1=ha770c72_14 - mumps-seq=5.2.1=h2104b81_11 - nbclassic=1.1.0=pyhd8ed1ab_0 - nbclient=0.10.2=pyhd8ed1ab_0 - nbconvert-core=7.16.4=pyhff2d567_2 - nbformat=5.10.4=pyhd8ed1ab_0 - ncurses=6.4=h6a678d5_0 - nest-asyncio=1.6.0=pyhd8ed1ab_0 - notebook=6.5.7=pyha770c72_0 - notebook-shim=0.2.4=pyhd8ed1ab_0 - numpy=1.22.3=py38h99721a1_2 - openssl=3.0.16=h5eee18b_0 - overrides=7.7.0=pyhd8ed1ab_0 - packaging=24.2=pyhd8ed1ab_2 - pandas=1.4.2=py38h47df419_1 - pandocfilters=1.5.0=pyhd8ed1ab_0 - parso=0.8.4=pyhd8ed1ab_0 - pexpect=4.9.0=pyhd8ed1ab_0 - pickleshare=0.7.5=py_1003 - pip=24.3.1=pyh8b19718_0 - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1 - platformdirs=4.3.6=pyhd8ed1ab_0 - pluggy=1.5.0=pyhd8ed1ab_0 - prometheus_client=0.21.0=pyhd8ed1ab_0 - prompt-toolkit=3.0.48=pyha770c72_0 - prompt_toolkit=3.0.48=hd8ed1ab_1 - psutil=5.9.1=py38h0a891b7_0 - ptyprocess=0.7.0=pyhd3deb0d_0 - pure_eval=0.2.3=pyhd8ed1ab_0 - pycparser=2.22=pyhd8ed1ab_0 - pygments=2.18.0=pyhd8ed1ab_0 - pyscipopt=3.5.0=py38h709712a_0 - pysocks=1.7.1=pyha2e5f31_6 - pytest=8.3.4=pyhd8ed1ab_0 - python=3.8.20=he870216_0 - python-dateutil=2.9.0=pyhd8ed1ab_0 - python-fastjsonschema=2.20.0=pyhd8ed1ab_0 - python-json-logger=2.0.7=pyhd8ed1ab_0 - python_abi=3.8=2_cp38 - pytz=2024.2=pyhd8ed1ab_0 - pyyaml=6.0=py38h0a891b7_4 - pyzmq=25.1.2=py38h6a678d5_0 - readline=8.2=h5eee18b_0 - referencing=0.35.1=pyhd8ed1ab_0 - requests=2.32.3=pyhd8ed1ab_0 - rfc3339-validator=0.1.4=pyhd8ed1ab_0 - rfc3986-validator=0.1.1=pyh9f0ad1d_0 - rpds-py=0.10.6=py38hb02cf49_0 - scip=7.0.3=hf5bcbcd_1 - scipy=1.8.1=py38h1ee437e_0 - scotch=6.0.9=h3858553_1 - send2trash=1.8.3=pyh0d859eb_0 - setuptools=75.1.0=py38h06a4308_0 - six=1.16.0=pyh6c4a22f_0 - sniffio=1.3.1=pyhd8ed1ab_0 - soupsieve=2.5=pyhd8ed1ab_1 - sqlite=3.45.3=h5eee18b_0 - stack_data=0.6.2=pyhd8ed1ab_0 - tbb=2020.2=h4bd325d_4 - terminado=0.18.1=pyh0d859eb_0 - tinycss2=1.4.0=pyhd8ed1ab_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.2=pyhd8ed1ab_0 - tornado=6.4.1=py38h5eee18b_0 - tqdm=4.67.1=pyhd8ed1ab_0 - traitlets=5.14.3=pyhd8ed1ab_0 - types-python-dateutil=2.9.0.20241003=pyhff2d567_0 - typing-extensions=4.12.2=hd8ed1ab_0 - typing_extensions=4.12.2=pyha770c72_0 - typing_utils=0.1.0=pyhd8ed1ab_0 - unixodbc=2.3.10=h583eb01_0 - uri-template=1.3.0=pyhd8ed1ab_0 - urllib3=2.2.3=pyhd8ed1ab_0 - wcwidth=0.2.13=pyhd8ed1ab_0 - webcolors=24.8.0=pyhd8ed1ab_0 - webencodings=0.5.1=pyhd8ed1ab_2 - websocket-client=1.8.0=pyhd8ed1ab_0 - wheel=0.44.0=py38h06a4308_0 - widgetsnbextension=4.0.13=pyhd8ed1ab_0 - xz=5.6.4=h5eee18b_1 - yaml=0.2.5=h7f98852_2 - zeromq=4.3.5=h6a678d5_0 - zipp=3.21.0=pyhd8ed1ab_0 - zlib=1.2.13=h5eee18b_1 - zstandard=0.23.0=py38h2c38b39_0 - zstd=1.5.6=hc292b87_0 - pip: - click==8.1.8 - deprecated==1.2.18 - dimod==0.12.17 - dwave-networkx==0.8.15 - networkx==2.8.8 - tabulate==0.8.10 - tsplib95==0.7.1 - wrapt==1.17.2 prefix: /opt/conda/envs/GeCO
[ "geco/mips/tests/test_facility_location.py::test_capacitated_facility_location", "geco/tests/test_generator.py::test_generator" ]
[]
[]
[]
MIT License
8,930
1,374
[ "geco/mips/facility_location.py" ]
geopandas__geopandas-1698
aa687662791863d55d3e27e8d468d9eb2eed82bd
2020-11-11 20:46:21
6e183ff6283e16b4f86dc83d788cce5db804e118
adriangb: Tests are passing now. CCing @martinfleis @brendan-ward @jorisvandenbossche for some review. adriangb: > Shapely does not seem to list supported binary predicates. At least I have not found it, so we'll have to hard-code that one. But that is fine since we'll soon switch to shapely 2.0 anyway. Yeah it's not very clean as well (at least the current implementation). Do you want to switch from `ValueError` to `NotImplementedError` for invalid predicates? Otherwise I'll just revert all of the error code to its previous state.
diff --git a/geopandas/sindex.py b/geopandas/sindex.py index e762782d..0716bc98 100644 --- a/geopandas/sindex.py +++ b/geopandas/sindex.py @@ -5,17 +5,6 @@ import numpy as np from . import _compat as compat -VALID_QUERY_PREDICATES = { - None, - "intersects", - "within", - "contains", - "overlaps", - "crosses", - "touches", -} - - def _get_sindex_class(): """Dynamically chooses a spatial indexing backend. @@ -63,10 +52,6 @@ if compat.HAS_RTREE: Geometries from which to build the spatial index. """ - # set of valid predicates for this spatial index - # by default, the global set - valid_query_predicates = VALID_QUERY_PREDICATES - def __init__(self, geometry): stream = ( (i, item.bounds, None) @@ -89,6 +74,27 @@ if compat.HAS_RTREE: [None] * self.geometries.size, dtype=object ) + @property + def valid_query_predicates(self): + """Returns valid predicates for this spatial index. + + Returns + ------- + set + Set of valid predicates for this spatial index. + """ + return { + None, + "intersects", + "within", + "contains", + "overlaps", + "crosses", + "touches", + "covers", + "contains_properly", + } + def query(self, geometry, predicate=None, sort=False): """Compatibility layer for pygeos.query. @@ -173,7 +179,12 @@ if compat.HAS_RTREE: elif predicate is not None: # For the remaining predicates, # we compare input_geom.predicate(tree_geom) - if predicate in ("contains", "intersects"): + if predicate in ( + "contains", + "intersects", + "covers", + "contains_properly", + ): # prepare this input geometry geometry = prep(geometry) tree_idx = [ @@ -277,10 +288,6 @@ if compat.HAS_PYGEOS: Geometries from which to build the spatial index. """ - # set of valid predicates for this spatial index - # by default, the global set - valid_query_predicates = VALID_QUERY_PREDICATES - def __init__(self, geometry): # set empty geometries to None to avoid segfault on GEOS <= 3.6 # see: @@ -293,6 +300,17 @@ if compat.HAS_PYGEOS: # store geometries, including empty geometries for user access self.geometries = geometry.copy() + @property + def valid_query_predicates(self): + """Returns valid predicates for this spatial index. + + Returns + ------- + set + Set of valid predicates for this spatial index. + """ + return pygeos.strtree.VALID_PREDICATES | set([None]) + def query(self, geometry, predicate=None, sort=False): """Wrapper for pygeos.query.
ENH: allow all predicates in sindex.query We have a hard-coded list of valid predicates for `sindex.query` and `query_bulk`. But pygeos now supports more of them (`contains_properly`, maybe more I did not check). Would be ideal to let pygeos/rtree to raise an error if an unsupported predicate is passed, so we don't have to keep track of what is supported where. I vaguely remember some discussion on this, but I could not find it, so I am not sure if there is not a good reason for this solution (maybe @adriangb remember?). https://github.com/geopandas/geopandas/blob/aa687662791863d55d3e27e8d468d9eb2eed82bd/geopandas/sindex.py#L8-L16
geopandas/geopandas
diff --git a/geopandas/tests/test_sindex.py b/geopandas/tests/test_sindex.py index a498ae44..a7391967 100644 --- a/geopandas/tests/test_sindex.py +++ b/geopandas/tests/test_sindex.py @@ -297,6 +297,56 @@ class TestPygeosInterface: box(-0.5, -0.5, 1.5, 1.5), [], ), # bbox intersects but geom does not touch + ( + "contains", + box(10, 10, 20, 20), + [5], + ), # contains but does not contains_properly + ( + "covers", + box(-0.5, -0.5, 1, 1), + [0, 1], + ), # covers (0, 0) and (1, 1) + ( + "covers", + box(0.001, 0.001, 0.99, 0.99), + [], + ), # does not cover any + ( + "covers", + box(0, 0, 1, 1), + [0, 1], + ), # covers but does not contain + ( + "contains_properly", + box(0, 0, 1, 1), + [], + ), # intersects but does not contain + ( + "contains_properly", + box(0, 0, 1.001, 1.001), + [1], + ), # intersects 2 and contains 1 + ( + "contains_properly", + box(0.5, 0.5, 1.001, 1.001), + [1], + ), # intersects 1 and contains 1 + ( + "contains_properly", + box(0.5, 0.5, 1.5, 1.5), + [1], + ), # intersects and contains + ( + "contains_properly", + box(-1, -1, 2, 2), + [0, 1], + ), # intersects and contains multiple + ( + "contains_properly", + box(10, 10, 20, 20), + [], + ), # contains but does not contains_properly ), ) def test_query(self, predicate, test_geom, expected): @@ -395,7 +445,11 @@ class TestPygeosInterface: ("within", [(0.25, 0.28, 0.75, 0.75)], [[], []]), # does not intersect ("within", [(0, 0, 10, 10)], [[], []]), # intersects but is not within ("within", [(11, 11, 12, 12)], [[0], [5]]), # intersects and is within - ("contains", [(0, 0, 1, 1)], [[], []]), # intersects but does not contain + ( + "contains", + [(0, 0, 1, 1)], + [[], []], + ), # intersects and covers, but does not contain ( "contains", [(0, 0, 1.001, 1.001)], @@ -412,6 +466,62 @@ class TestPygeosInterface: [(-1, -1, 2, 2)], [[0, 0], [0, 1]], ), # intersects and contains multiple + ( + "contains", + [(10, 10, 20, 20)], + [[0], [5]], + ), # contains but does not contains_properly + ("touches", [(-1, -1, 0, 0)], [[0], [0]]), # bbox intersects and touches + ( + "touches", + [(-0.5, -0.5, 1.5, 1.5)], + [[], []], + ), # bbox intersects but geom does not touch + ( + "covers", + [(-0.5, -0.5, 1, 1)], + [[0, 0], [0, 1]], + ), # covers (0, 0) and (1, 1) + ( + "covers", + [(0.001, 0.001, 0.99, 0.99)], + [[], []], + ), # does not cover any + ( + "covers", + [(0, 0, 1, 1)], + [[0, 0], [0, 1]], + ), # covers but does not contain + ( + "contains_properly", + [(0, 0, 1, 1)], + [[], []], + ), # intersects but does not contain + ( + "contains_properly", + [(0, 0, 1.001, 1.001)], + [[0], [1]], + ), # intersects 2 and contains 1 + ( + "contains_properly", + [(0.5, 0.5, 1.001, 1.001)], + [[0], [1]], + ), # intersects 1 and contains 1 + ( + "contains_properly", + [(0.5, 0.5, 1.5, 1.5)], + [[0], [1]], + ), # intersects and contains + ( + "contains_properly", + [(-1, -1, 2, 2)], + [[0, 0], [0, 1]], + ), # intersects and contains multiple + ( + "contains_properly", + [(10, 10, 20, 20)], + [[], []], + ), # contains but does not contains_properly ), ) def test_query_bulk(self, predicate, test_geom, expected):
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
0.8
{ "env_vars": null, "env_yml_path": [ "environment.yml" ], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": true, "packages": "environment.yml", "pip_packages": [ "pytest pytest-cov pytest-xdist", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1722977137225/work black @ file:///home/conda/feedstock_root/build_artifacts/black-recipe_1666773063432/work Bottleneck @ file:///home/conda/feedstock_root/build_artifacts/bottleneck_1649629660965/work Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1648883617327/work certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1636046052501/work cfgv @ file:///home/conda/feedstock_root/build_artifacts/cfgv_1629909281805/work charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work click @ file:///home/conda/feedstock_root/build_artifacts/click_1651215140632/work click-plugins==1.1.1 cligj @ file:///home/conda/feedstock_root/build_artifacts/cligj_1633637764473/work codecov @ file:///home/conda/feedstock_root/build_artifacts/codecov_1681778020913/work colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1652409068862/work cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work descartes==1.1.0 distlib @ file:///home/conda/feedstock_root/build_artifacts/distlib_1728557174656/work exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1720869315914/work execnet==2.0.2 filelock @ file:///home/conda/feedstock_root/build_artifacts/filelock_1726613473834/work Fiona==1.8.20 fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1651017735934/work GDAL==3.3.2 geographiclib @ file:///home/conda/feedstock_root/build_artifacts/geographiclib_1650904727084/work -e git+https://github.com/geopandas/geopandas.git@aa687662791863d55d3e27e8d468d9eb2eed82bd#egg=geopandas geopy @ file:///home/conda/feedstock_root/build_artifacts/geopy_1709140178986/work greenlet @ file:///home/conda/feedstock_root/build_artifacts/greenlet_1648882385539/work identify @ file:///home/conda/feedstock_root/build_artifacts/identify_1732589372185/work idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1653252814274/work iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1673103042956/work kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1648854392523/work matplotlib @ file:///croot/matplotlib-suite_1667356714455/work munch==2.5.0 munkres==1.1.4 mypy-extensions @ file:///home/conda/feedstock_root/build_artifacts/mypy_extensions_1675543315189/work nodeenv @ file:///home/conda/feedstock_root/build_artifacts/nodeenv_1717585263558/work numexpr @ file:///home/conda/feedstock_root/build_artifacts/numexpr_1649636747170/work numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1649806299270/work olefile @ file:///home/conda/feedstock_root/build_artifacts/olefile_1701735466804/work packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1696202382185/work pandas==1.3.5 pathspec @ file:///home/conda/feedstock_root/build_artifacts/pathspec_1702249949303/work Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1630696604087/work platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1696272223550/work pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1648772594554/work pre-commit @ file:///home/conda/feedstock_root/build_artifacts/pre-commit_1657541073402/work psycopg2 @ file:///home/conda/feedstock_root/build_artifacts/psycopg2-split_1636701483782/work pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work pyproj @ file:///home/conda/feedstock_root/build_artifacts/pyproj_1650061576675/work PyQt5==5.12.3 PyQt5_sip==4.19.18 PyQtChart==5.12 PyQtWebEngine==5.12.1 PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1648857264451/work pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1704035161844/work pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1684964868191/work pytest-xdist==3.5.0 python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1648757092905/work requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1716354486713/work Rtree @ file:///home/conda/feedstock_root/build_artifacts/rtree_1665582585745/work Shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1635194349843/work six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work SQLAlchemy @ file:///home/conda/feedstock_root/build_artifacts/sqlalchemy_1651017966921/work toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1727974628237/work tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1648827244717/work typed-ast @ file:///home/conda/feedstock_root/build_artifacts/typed-ast_1650218777101/work typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1688315532570/work ukkonen @ file:///home/conda/feedstock_root/build_artifacts/ukkonen_1649407025308/work unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1649111917568/work urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1708239446578/work virtualenv @ file:///home/conda/feedstock_root/build_artifacts/virtualenv_1681949329741/work zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1677313463193/work
name: geopandas channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - alsa-lib=1.2.3.2=h166bdaf_0 - attrs=24.2.0=pyh71513ae_0 - black=22.10.0=py37h89c1867_1 - boost-cpp=1.74.0=h312852a_4 - bottleneck=1.3.4=py37hda87dfa_1 - brotli=1.0.9=h166bdaf_7 - brotli-bin=1.0.9=h166bdaf_7 - brotli-python=1.0.9=py37hd23a5d3_7 - bzip2=1.0.8=h7f98852_4 - c-ares=1.18.1=h7f98852_0 - ca-certificates=2025.2.25=h06a4308_0 - cairo=1.16.0=h6cf1ce9_1008 - certifi=2024.8.30=pyhd8ed1ab_0 - cffi=1.15.0=py37h036bc23_0 - cfgv=3.3.1=pyhd8ed1ab_0 - cfitsio=3.470=hb418390_7 - charset-normalizer=3.4.0=pyhd8ed1ab_0 - click=8.1.3=py37h89c1867_0 - click-plugins=1.1.1=py_0 - cligj=0.7.2=pyhd8ed1ab_1 - codecov=2.1.13=pyhd8ed1ab_0 - colorama=0.4.6=pyhd8ed1ab_0 - coverage=6.3.3=py37h540881e_0 - curl=7.79.1=h2574ce0_1 - cycler=0.11.0=pyhd8ed1ab_0 - dbus=1.13.6=h48d8840_2 - descartes=1.1.0=py_4 - distlib=0.3.9=pyhd8ed1ab_0 - exceptiongroup=1.2.2=pyhd8ed1ab_0 - expat=2.4.8=h27087fc_0 - filelock=3.16.1=pyhd8ed1ab_0 - fiona=1.8.20=py37hb7e2723_2 - font-ttf-dejavu-sans-mono=2.37=hab24e00_0 - font-ttf-inconsolata=3.000=h77eed37_0 - font-ttf-source-code-pro=2.038=h77eed37_0 - font-ttf-ubuntu=0.83=h77eed37_3 - fontconfig=2.14.0=h8e229c2_0 - fonts-conda-ecosystem=1=0 - fonts-conda-forge=1=0 - fonttools=4.33.3=py37h540881e_0 - freetype=2.12.1=h4a9f257_0 - freexl=1.0.6=h7f98852_0 - gdal=3.3.2=py37hd5a0ba4_2 - geographiclib=2.0=pyhd8ed1ab_0 - geopy=2.4.1=pyhd8ed1ab_1 - geos=3.9.1=h9c3ff4c_2 - geotiff=1.7.0=h4f31c25_0 - gettext=0.19.8.1=h73d1719_1008 - giflib=5.2.1=h36c2ea0_2 - glib=2.68.4=h9c3ff4c_1 - glib-tools=2.68.4=h9c3ff4c_1 - greenlet=1.1.2=py37hd23a5d3_2 - gst-plugins-base=1.18.5=hf529b03_0 - gstreamer=1.18.5=h76c114f_0 - hdf4=4.2.15=h10796ff_3 - hdf5=1.12.1=nompi_h2750804_100 - icu=68.2=h9c3ff4c_0 - identify=2.6.3=pyhd8ed1ab_0 - idna=3.10=pyhd8ed1ab_0 - importlib-metadata=4.11.4=py37h89c1867_0 - importlib_metadata=4.11.4=hd8ed1ab_0 - iniconfig=2.0.0=pyhd8ed1ab_0 - jbig=2.1=h7f98852_2003 - jpeg=9e=h166bdaf_1 - json-c=0.15=h98cffda_0 - kealib=1.4.14=h87e4c3c_3 - keyutils=1.6.1=h166bdaf_0 - kiwisolver=1.4.2=py37h7cecad7_1 - krb5=1.19.3=h3790be6_0 - lcms2=2.12=hddcbb42_0 - ld_impl_linux-64=2.40=h12ee557_0 - lerc=2.2.1=h9c3ff4c_0 - libblas=3.9.0=16_linux64_openblas - libbrotlicommon=1.0.9=h166bdaf_7 - libbrotlidec=1.0.9=h166bdaf_7 - libbrotlienc=1.0.9=h166bdaf_7 - libcblas=3.9.0=16_linux64_openblas - libclang=11.1.0=default_ha53f305_1 - libcurl=7.79.1=h2574ce0_1 - libdap4=3.20.6=hd7c4107_2 - libdeflate=1.7=h7f98852_5 - libedit=3.1.20191231=he28a2e2_2 - libev=4.33=h516909a_1 - libevent=2.1.10=h9b69904_4 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgdal=3.3.2=hd2dd7ad_2 - libgfortran-ng=13.2.0=h69a702a_0 - libgfortran5=13.2.0=ha4646dd_0 - libglib=2.68.4=h174f98d_1 - libgomp=11.2.0=h1234567_1 - libiconv=1.17=h166bdaf_0 - libkml=1.3.0=h238a007_1014 - liblapack=3.9.0=16_linux64_openblas - libllvm11=11.1.0=hf817b99_2 - libnetcdf=4.8.1=nompi_hb3fd0d9_101 - libnghttp2=1.43.0=h812cca2_1 - libnsl=2.0.0=h7f98852_0 - libogg=1.3.4=h7f98852_1 - libopenblas=0.3.21=h043d6bf_0 - libopus=1.3.1=h7f98852_1 - libpng=1.6.37=h21135ba_2 - libpq=13.3=hd57d9b9_0 - librttopo=1.1.0=h1185371_6 - libspatialindex=1.9.3=h9c3ff4c_4 - libspatialite=5.0.1=h8694cbe_6 - libssh2=1.10.0=ha56f1ee_2 - libstdcxx-ng=11.2.0=h1234567_1 - libtiff=4.3.0=hf544144_1 - libuuid=2.32.1=h7f98852_1000 - libvorbis=1.3.7=h9c3ff4c_0 - libwebp-base=1.2.2=h7f98852_1 - libxcb=1.13=h7f98852_1004 - libxkbcommon=1.0.3=he3ba5ed_0 - libxml2=2.9.12=h72842e0_0 - libzip=1.8.0=h4de3113_1 - lz4-c=1.9.3=h9c3ff4c_1 - matplotlib=3.5.3=py37h89c1867_2 - matplotlib-base=3.5.3=py37hf590b9c_0 - munch=2.5.0=py_0 - munkres=1.1.4=pyh9f0ad1d_0 - mypy_extensions=1.0.0=pyha770c72_0 - mysql-common=8.0.25=ha770c72_2 - mysql-libs=8.0.25=hfa10184_2 - ncurses=6.4=h6a678d5_0 - nodeenv=1.9.1=pyhd8ed1ab_0 - nomkl=1.0=h5ca1d4c_0 - nspr=4.32=h9c3ff4c_1 - nss=3.69=hb5efdd6_1 - numexpr=2.8.0=py37h85a3170_102 - numpy=1.21.6=py37h976b520_0 - olefile=0.47=pyhd8ed1ab_0 - openjpeg=2.4.0=hb52868f_1 - openssl=1.1.1w=h7f8727e_0 - packaging=23.2=pyhd8ed1ab_0 - pandas=1.3.5=py37h8c16a72_0 - pathspec=0.12.1=pyhd8ed1ab_0 - pcre=8.45=h9c3ff4c_0 - pillow=8.3.2=py37h0f21c89_0 - pip=22.3.1=py37h06a4308_0 - pixman=0.40.0=h36c2ea0_0 - platformdirs=3.11.0=pyhd8ed1ab_0 - pluggy=1.0.0=py37h89c1867_3 - poppler=21.09.0=ha39eefc_3 - poppler-data=0.4.12=hd8ed1ab_0 - postgresql=13.3=h2510834_0 - pre-commit=2.20.0=py37h89c1867_0 - proj=8.0.1=h277dcde_0 - psycopg2=2.9.2=py37h5e8e339_0 - pthread-stubs=0.4=h36c2ea0_1001 - pycparser=2.21=pyhd8ed1ab_0 - pyparsing=3.1.4=pyhd8ed1ab_0 - pyproj=3.2.1=py37hcc46e62_6 - pyqt=5.12.3=py37h89c1867_8 - pyqt-impl=5.12.3=py37hac37412_8 - pyqt5-sip=4.19.18=py37hcd2ae1e_8 - pyqtchart=5.12=py37he336c9b_8 - pyqtwebengine=5.12.1=py37he336c9b_8 - pysocks=1.7.1=py37h89c1867_5 - pytest=7.4.4=pyhd8ed1ab_0 - pytest-cov=4.1.0=pyhd8ed1ab_0 - python=3.7.16=h7a1cb2a_0 - python-dateutil=2.9.0=pyhd8ed1ab_0 - python_abi=3.7=2_cp37m - pytz=2024.2=pyhd8ed1ab_0 - pyyaml=6.0=py37h540881e_4 - qt=5.12.9=hda022c4_4 - readline=8.2=h5eee18b_0 - requests=2.32.2=pyhd8ed1ab_0 - rtree=1.0.1=py37h0b55af0_0 - setuptools=65.6.3=py37h06a4308_0 - shapely=1.8.0=py37h48c49eb_0 - six=1.16.0=pyh6c4a22f_0 - sqlalchemy=1.4.36=py37h540881e_0 - sqlite=3.45.3=h5eee18b_0 - tiledb=2.3.4=he87e0bf_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd8ed1ab_0 - tomli=2.0.2=pyhd8ed1ab_0 - tornado=6.1=py37h540881e_3 - typed-ast=1.5.3=py37h540881e_0 - typing-extensions=4.7.1=hd8ed1ab_0 - typing_extensions=4.7.1=pyha770c72_0 - tzcode=2022a=h166bdaf_0 - tzdata=2025b=h78e105d_0 - ukkonen=1.0.1=py37h7cecad7_2 - unicodedata2=14.0.0=py37h540881e_1 - urllib3=2.2.1=pyhd8ed1ab_0 - virtualenv=20.21.1=pyhd8ed1ab_0 - wheel=0.38.4=py37h06a4308_0 - xerces-c=3.2.3=h9d8b166_3 - xorg-kbproto=1.0.7=h7f98852_1002 - xorg-libice=1.0.10=h7f98852_0 - xorg-libsm=1.2.3=hd9c2040_1000 - xorg-libx11=1.7.2=h7f98852_0 - xorg-libxau=1.0.9=h7f98852_0 - xorg-libxdmcp=1.1.3=h7f98852_0 - xorg-libxext=1.3.4=h7f98852_1 - xorg-libxrender=0.9.10=h7f98852_1003 - xorg-renderproto=0.11.1=h7f98852_1002 - xorg-xextproto=7.3.0=h7f98852_1002 - xorg-xproto=7.0.31=h7f98852_1007 - xz=5.6.4=h5eee18b_1 - yaml=0.2.5=h7f98852_2 - zipp=3.15.0=pyhd8ed1ab_0 - zlib=1.2.13=h5eee18b_1 - zstd=1.5.0=ha95c52a_0 - pip: - execnet==2.0.2 - pytest-xdist==3.5.0 prefix: /opt/conda/envs/geopandas
[ "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[covers-test_geom20-expected20]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[covers-test_geom21-expected21]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[covers-test_geom22-expected22]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains_properly-test_geom23-expected23]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains_properly-test_geom24-expected24]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains_properly-test_geom25-expected25]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains_properly-test_geom26-expected26]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains_properly-test_geom27-expected27]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains_properly-test_geom28-expected28]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[covers-test_geom19-expected19]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[covers-test_geom20-expected20]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[covers-test_geom21-expected21]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains_properly-test_geom22-expected22]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains_properly-test_geom23-expected23]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains_properly-test_geom24-expected24]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains_properly-test_geom25-expected25]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains_properly-test_geom26-expected26]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains_properly-test_geom27-expected27]" ]
[]
[ "geopandas/tests/test_sindex.py::TestSeriesSindex::test_has_sindex", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_empty_geoseries", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_point", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_empty_point", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_polygons", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_polygons_append", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_lazy_build", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_rebuild_on_item_change", "geopandas/tests/test_sindex.py::TestSeriesSindex::test_rebuild_on_slice", "geopandas/tests/test_sindex.py::TestFrameSindex::test_sindex", "geopandas/tests/test_sindex.py::TestFrameSindex::test_lazy_build", "geopandas/tests/test_sindex.py::TestFrameSindex::test_sindex_rebuild_on_set_geometry", "geopandas/tests/test_sindex.py::TestFrameSindex::test_rebuild_on_row_slice", "geopandas/tests/test_sindex.py::TestFrameSindex::test_rebuild_on_single_col_selection", "geopandas/tests/test_sindex.py::TestFrameSindex::test_rebuild_on_multiple_col_selection", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_bounds_tuple[test_geom0-expected0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_bounds_tuple[test_geom1-expected1]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_bounds_tuple[test_geom2-expected2]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_bounds_tuple[test_geom3-expected3]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_invalid_bounds_tuple[test_geom0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_invalid_bounds_tuple[-0.5]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_invalid_bounds_tuple[None]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_intersection_invalid_bounds_tuple[test_geom3]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[None-test_geom0-expected0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[None-test_geom1-expected1]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[None-test_geom2-expected2]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[None-test_geom3-expected3]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[intersects-test_geom4-expected4]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[intersects-test_geom5-expected5]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[intersects-test_geom6-expected6]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[intersects-test_geom7-expected7]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[within-test_geom8-expected8]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[within-test_geom9-expected9]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[within-test_geom10-expected10]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[within-test_geom11-expected11]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains-test_geom12-expected12]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains-test_geom13-expected13]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains-test_geom14-expected14]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains-test_geom15-expected15]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains-test_geom16-expected16]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[touches-test_geom17-expected17]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[touches-test_geom18-expected18]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query[contains-test_geom19-expected19]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_invalid_geometry", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_empty_geometry[None-expected_value0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_empty_geometry[test_geom1-expected_value1]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_empty_geometry[test_geom2-expected_value2]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_empty_geometry[test_geom3-expected_value3]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_empty_geometry[test_geom4-expected_value4]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_invalid_predicate", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_sorting[True-expected0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[None-test_geom0-expected0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[None-test_geom1-expected1]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[None-test_geom2-expected2]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[intersects-test_geom3-expected3]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[intersects-test_geom4-expected4]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[intersects-test_geom5-expected5]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[intersects-test_geom6-expected6]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[intersects-test_geom7-expected7]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[within-test_geom8-expected8]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[within-test_geom9-expected9]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[within-test_geom10-expected10]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains-test_geom11-expected11]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains-test_geom12-expected12]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains-test_geom13-expected13]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains-test_geom14-expected14]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains-test_geom15-expected15]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[contains-test_geom16-expected16]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[touches-test_geom17-expected17]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk[touches-test_geom18-expected18]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_empty_geometry[test_geoms0-expected_value0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_empty_geometry[test_geoms1-expected_value1]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_empty_geometry[test_geoms2-expected_value2]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_empty_geometry[test_geoms3-expected_value3]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_empty_input_array", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_invalid_input_geometry", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_invalid_predicate", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_input_type[None-test_geom0-expected0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_input_type[intersects-test_geom1-expected1]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_input_type[contains-test_geom2-expected2]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_query_bulk_sorting[True-expected0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_empty_tree_geometries", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_size", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_len", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_is_empty", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_integration_natural_earth[None-expected_shape0]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_integration_natural_earth[intersects-expected_shape1]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_integration_natural_earth[within-expected_shape2]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_integration_natural_earth[contains-expected_shape3]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_integration_natural_earth[overlaps-expected_shape4]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_integration_natural_earth[crosses-expected_shape5]", "geopandas/tests/test_sindex.py::TestPygeosInterface::test_integration_natural_earth[touches-expected_shape6]" ]
[]
BSD 3-Clause "New" or "Revised" License
8,933
775
[ "geopandas/sindex.py" ]
avilum__linqit-8
34f7891d1621a6463d4a30271abe54822aef8f97
2020-11-11 21:08:06
34f7891d1621a6463d4a30271abe54822aef8f97
diff --git a/linqit/__init__.py b/linqit/__init__.py index 05260a1..af20d43 100644 --- a/linqit/__init__.py +++ b/linqit/__init__.py @@ -3,7 +3,7 @@ from itertools import chain DEFAULT_LAZY = True -# A default variable for the fucntion, so None as an argument will be valid, but not default. +# A default variable for the function, so None as an argument will be valid, but not default. _NONE = type('_NONE', (object,), {}) @@ -202,6 +202,13 @@ class List(list): return default raise + def order_by(self, expression=None): + """ + Returns a List of data, sorted according to expression. If no expression is given, the default sort is used. + """ + sorted_data = sorted(self, key=expression) + return List(sorted_data) + def select(self, expression): """ Returns a list of the values of the expression, from all the wrapped elements.
Add support for order_by? Hey, great library. I would love to do something like this but seems to just use the substandard built-in list implementation which returns None: ```python people = List() people.append(Person("Michael", 47)) people.append(Person("Sarah", 50)) people.append(Person("Jake", 42)) older = people.where(lambda p: p.age > 43).sort(lambda p: -p.age) ``` But sadly, sort returns None and I didn't see an `order_by`, what am I missing?
avilum/linqit
diff --git a/tests/test_list.py b/tests/test_list.py index ddafe69..ff3e49a 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -1,5 +1,6 @@ from datetime import datetime from unittest import TestCase + from linqit import List @@ -213,3 +214,22 @@ class ListTests(TestCase): self.assertEqual(self.list[1].name, self.list.concat(self.list).where(lambda e: e.age > 18).skip(1).except_for( lambda e: e.name == 'bob').select( lambda x: x.name).last()) + + def test_order_by_bare(self): + data = [1, -1, 7, 200, 4, 3] + sorted_data = sorted(data) + + linq_data = List(data).order_by() + self.assertEqual(sorted_data, linq_data) + + def test_order_by_complex(self): + data = [ + Person('jake', 'samson', None, 32), + Person('sam', 'thompson', None, 44), + Person('sarah', 'smith', None, 41), + Person('zoe', 'lee', None, 27), + ] + sorted_data = sorted(data, key=lambda p: p.age, reverse=True) + + linq_data = List(data).order_by(lambda p: -p.age) + self.assertEqual(sorted_data, linq_data)
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 -e git+https://github.com/avilum/linqit.git@34f7891d1621a6463d4a30271abe54822aef8f97#egg=linqit packaging==24.2 pluggy==1.5.0 pytest==8.3.5 tomli==2.2.1
name: linqit channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/linqit
[ "tests/test_list.py::ListTests::test_order_by_bare", "tests/test_list.py::ListTests::test_order_by_complex" ]
[]
[ "tests/test_list.py::ListTests::test_add_method", "tests/test_list.py::ListTests::test_all_method", "tests/test_list.py::ListTests::test_all_method_with_false_arguments", "tests/test_list.py::ListTests::test_any_method", "tests/test_list.py::ListTests::test_any_method_with_all_false", "tests/test_list.py::ListTests::test_any_method_without_expression", "tests/test_list.py::ListTests::test_any_method_without_expression_and_empty_list", "tests/test_list.py::ListTests::test_avg_method", "tests/test_list.py::ListTests::test_concat_method", "tests/test_list.py::ListTests::test_contains_method_with_existing_item", "tests/test_list.py::ListTests::test_contains_method_with_non_existing_item", "tests/test_list.py::ListTests::test_different_types_sanity", "tests/test_list.py::ListTests::test_except_for_method", "tests/test_list.py::ListTests::test_first_method", "tests/test_list.py::ListTests::test_first_method_with_nonexisisting_value_raises_indexerror", "tests/test_list.py::ListTests::test_first_method_with_nonexisisting_value_returns_none", "tests/test_list.py::ListTests::test_get_list_properties_should_combine_values", "tests/test_list.py::ListTests::test_init_with_several_items", "tests/test_list.py::ListTests::test_intersect_method", "tests/test_list.py::ListTests::test_list_peoperties_with_unique_object_peoperty", "tests/test_list.py::ListTests::test_max_method", "tests/test_list.py::ListTests::test_min_method", "tests/test_list.py::ListTests::test_non_existing_attribute_raises_attribute_error", "tests/test_list.py::ListTests::test_non_list_peoperties_with_unique_object_peoperty", "tests/test_list.py::ListTests::test_non_list_properties_should_combine_values", "tests/test_list.py::ListTests::test_one_liner", "tests/test_list.py::ListTests::test_runtime_changes_apply", "tests/test_list.py::ListTests::test_select_method", "tests/test_list.py::ListTests::test_skip_method", "tests/test_list.py::ListTests::test_slice_method", "tests/test_list.py::ListTests::test_sum_method", "tests/test_list.py::ListTests::test_take_method", "tests/test_list.py::ListTests::test_times_method", "tests/test_list.py::ListTests::test_where_method" ]
[]
Apache License 2.0
8,934
260
[ "linqit/__init__.py" ]
frictionlessdata__frictionless-py-527
38002600c959bbe2863d533177d56a9df89c9fbd
2020-11-12 08:16:27
38002600c959bbe2863d533177d56a9df89c9fbd
diff --git a/frictionless/validate/inquiry.py b/frictionless/validate/inquiry.py index 4136259b..8e570d92 100644 --- a/frictionless/validate/inquiry.py +++ b/frictionless/validate/inquiry.py @@ -9,7 +9,7 @@ from .. import exceptions @Report.from_validate -def validate_inquiry(source): +def validate_inquiry(source, *, nopool=False): """Validate inquiry API | Usage @@ -18,6 +18,7 @@ def validate_inquiry(source): Parameters: source (dict|str): an inquiry descriptor + nopool? (bool): disable multiprocessing Returns: Report: validation report @@ -44,13 +45,14 @@ def validate_inquiry(source): continue tasks.append(task) - # Validate task - if len(tasks) == 1: - report = validate(**helpers.create_options(tasks[0])) - reports.append(report) + # Validate sequentially + if len(tasks) == 1 or nopool: + for task in tasks: + report = validate(**helpers.create_options(task)) + reports.append(report) - # Validate tasks - if len(tasks) > 1: + # Validate in-parallel + else: with Pool() as pool: reports.extend(pool.map(partial(helpers.apply_function, validate), tasks)) diff --git a/frictionless/validate/package.py b/frictionless/validate/package.py index ebea2872..0fd8c9a9 100644 --- a/frictionless/validate/package.py +++ b/frictionless/validate/package.py @@ -8,7 +8,13 @@ from .. import exceptions @Report.from_validate def validate_package( - source, basepath=None, trusted=False, noinfer=False, nolookup=False, **options + source, + basepath=None, + trusted=False, + noinfer=False, + nolookup=False, + nopool=False, + **options ): """Validate package @@ -22,6 +28,7 @@ def validate_package( trusted? (bool): don't raise an exception on unsafe paths noinfer? (bool): don't call `package.infer` nolookup? (bool): don't read lookup tables skipping integrity checks + nopool? (bool): disable multiprocessing **options (dict): options for every extracted table Returns: @@ -62,7 +69,7 @@ def validate_package( # Validate inquiry inquiry = Inquiry(descriptor) - report = validate_inquiry(inquiry) + report = validate_inquiry(inquiry, nopool=nopool) # Return report return Report(time=timer.time, errors=report["errors"], tables=report["tables"])
Multiprocessing introduces high overhead for small validation jobs # Overview Validation can now be done on just metadata and table headers, so migrating `goodtables-pandas` should now be very straightforward. Thank you. (see https://github.com/frictionlessdata/frictionless-py/issues/503, closed by #514 and #515) ```python from frictionless import validate, Query validate(..., query=Query(limit_rows=1), nolookup=True, noinfer=True) ``` *NOTE: `limit_rows=1` because `Query(limit_rows=0)` queries all rows, rather than no rows.* However, I wanted to point out that with the validation process so reduced, the use of `multiprocessing.Pool` introduces a significant overhead. My test package with three resources takes 1.2 s, instead of just 0.2 s if I replace: https://github.com/frictionlessdata/frictionless-py/blob/09cc98e1966d6f97f4eecb47757f45f8a946c5e7/frictionless/validate/inquiry.py#L54-L55 with: ```python for task in tasks: reports.append(validate(**task)) ``` So it may be worth considering a global or local configuration to set the max number of threads (threads > 1) or disable parallel processing altogether (threads == 1). --- Please preserve this line to notify @roll (lead of this repository)
frictionlessdata/frictionless-py
diff --git a/tests/validate/test_inquiry.py b/tests/validate/test_inquiry.py index 38a4a976..2340deb3 100644 --- a/tests/validate/test_inquiry.py +++ b/tests/validate/test_inquiry.py @@ -101,3 +101,20 @@ def test_validate_with_multiple_packages(): [3, 3, None, "primary-key-error"], [4, 4, None, "blank-row"], ] + + +def test_validate_with_multiple_packages_with_nopool(): + report = validate( + { + "tasks": [ + {"source": "data/package/datapackage.json"}, + {"source": "data/invalid/datapackage.json"}, + ] + }, + nopool=True, + ) + assert report.flatten(["tablePosition", "rowPosition", "fieldPosition", "code"]) == [ + [3, 3, None, "blank-row"], + [3, 3, None, "primary-key-error"], + [4, 4, None, "blank-row"], + ] diff --git a/tests/validate/test_package.py b/tests/validate/test_package.py index 5dd51144..299497a8 100644 --- a/tests/validate/test_package.py +++ b/tests/validate/test_package.py @@ -146,6 +146,15 @@ def test_validate_package_dialect_header_false(): assert report.valid +def test_validate_with_nopool(): + report = validate("data/invalid/datapackage.json", nopool=True) + assert report.flatten(["tablePosition", "rowPosition", "fieldPosition", "code"]) == [ + [1, 3, None, "blank-row"], + [1, 3, None, "primary-key-error"], + [2, 4, None, "blank-row"], + ] + + # Checksum DESCRIPTOR_SH = {
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
3.27
{ "env_vars": null, "env_yml_path": null, "install": "pip install --upgrade -e .[bigquery,ckan,excel,gsheets,html,json,ods,pandas,s3,server,spss,sql,dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc", "apt-get install -y postgresql libpq-dev" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asttokens==3.0.0 attrs==25.3.0 beautifulsoup4==4.13.3 black==23.12.1 bleach==6.2.0 blinker==1.9.0 boto3==1.37.23 botocore==1.37.23 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 ckanapi==4.8 click==8.1.8 coverage==7.8.0 coveralls==4.0.1 cryptography==44.0.2 cssselect==1.3.0 databind==4.5.2 databind.core==4.5.2 databind.json==4.5.2 decorator==5.2.1 defusedxml==0.7.1 Deprecated==1.2.18 docopt==0.6.2 docspec==2.2.1 docspec-python==2.2.1 docstring_parser==0.11 et_xmlfile==2.0.0 exceptiongroup==1.2.2 execnet==2.1.1 executing==2.2.0 ezodf==0.3.2 fastjsonschema==2.21.1 filelock==3.18.0 Flask==3.1.0 -e git+https://github.com/frictionlessdata/frictionless-py.git@38002600c959bbe2863d533177d56a9df89c9fbd#egg=frictionless gdown==5.2.0 google-api-core==2.24.2 google-api-python-client==2.166.0 google-auth==2.38.0 google-auth-httplib2==0.2.0 googleapis-common-protos==1.69.2 greenlet==3.1.1 gunicorn==23.0.0 httplib2==0.22.0 idna==3.10 ijson==3.3.0 importlib_metadata==8.6.1 iniconfig==2.1.0 ipython==8.18.1 isodate==0.7.2 itsdangerous==2.2.0 jedi==0.19.2 Jinja2==3.1.6 jmespath==1.0.1 jsonlines==4.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyterlab_pygments==0.3.0 lxml==5.3.1 MarkupSafe==3.0.2 matplotlib-inline==0.1.7 mccabe==0.7.0 mistune==3.1.3 moto==5.1.2 mypy==1.15.0 mypy-extensions==1.0.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nr-date==2.1.0 nr-stream==1.1.5 nr.util==0.8.12 numpy==2.0.2 oauth2client==4.1.3 openpyxl==3.1.5 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 parso==0.8.4 pathspec==0.12.1 petl==1.7.15 pexpect==4.9.0 platformdirs==4.3.7 pluggy==1.5.0 prompt_toolkit==3.0.50 proto-plus==1.26.1 protobuf==6.30.2 psycopg2==2.9.10 ptyprocess==0.7.0 pure_eval==0.2.3 pyasn1==0.6.1 pyasn1_modules==0.4.2 pycodestyle==2.13.0 pycparser==2.22 pydoc-markdown==4.8.2 pydocstyle==6.3.0 pyflakes==3.3.2 Pygments==2.19.1 pylama==8.4.1 PyMySQL==1.1.1 pyparsing==3.2.3 pyquery==2.0.1 PySocks==1.7.1 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 python-dotenv==1.1.0 python-slugify==8.0.4 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 requests==2.32.3 responses==0.25.7 rfc3986==2.0.0 rpds-py==0.24.0 rsa==4.9 s3transfer==0.11.4 savReaderWriter==3.4.2 simpleeval==1.0.3 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 soupsieve==2.6 SQLAlchemy==2.0.40 stack-data==0.6.3 stringcase==1.2.0 text-unidecode==1.3 tinycss2==1.4.0 tomli==2.2.1 tomli_w==1.2.0 tornado==6.4.2 tqdm==4.67.1 traitlets==5.14.3 typeapi==2.2.4 typing_extensions==4.13.0 tzdata==2025.2 unicodecsv==0.14.1 uritemplate==4.1.1 urllib3==1.26.20 watchdog==6.0.0 wcwidth==0.2.13 webencodings==0.5.1 Werkzeug==3.1.3 wrapt==1.17.2 xlrd==2.0.1 xlwt==1.3.0 xmltodict==0.14.2 yapf==0.43.0 zipp==3.21.0
name: frictionless-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asttokens==3.0.0 - attrs==25.3.0 - beautifulsoup4==4.13.3 - black==23.12.1 - bleach==6.2.0 - blinker==1.9.0 - boto3==1.37.23 - botocore==1.37.23 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - ckanapi==4.8 - click==8.1.8 - coverage==7.8.0 - coveralls==4.0.1 - cryptography==44.0.2 - cssselect==1.3.0 - databind==4.5.2 - databind-core==4.5.2 - databind-json==4.5.2 - decorator==5.2.1 - defusedxml==0.7.1 - deprecated==1.2.18 - docopt==0.6.2 - docspec==2.2.1 - docspec-python==2.2.1 - docstring-parser==0.11 - et-xmlfile==2.0.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - executing==2.2.0 - ezodf==0.3.2 - fastjsonschema==2.21.1 - filelock==3.18.0 - flask==3.1.0 - frictionless==3.27.3 - gdown==5.2.0 - google-api-core==2.24.2 - google-api-python-client==2.166.0 - google-auth==2.38.0 - google-auth-httplib2==0.2.0 - googleapis-common-protos==1.69.2 - greenlet==3.1.1 - gunicorn==23.0.0 - httplib2==0.22.0 - idna==3.10 - ijson==3.3.0 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipython==8.18.1 - isodate==0.7.2 - itsdangerous==2.2.0 - jedi==0.19.2 - jinja2==3.1.6 - jmespath==1.0.1 - jsonlines==4.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyterlab-pygments==0.3.0 - lxml==5.3.1 - markupsafe==3.0.2 - matplotlib-inline==0.1.7 - mccabe==0.7.0 - mistune==3.1.3 - moto==5.1.2 - mypy==1.15.0 - mypy-extensions==1.0.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nr-date==2.1.0 - nr-stream==1.1.5 - nr-util==0.8.12 - numpy==2.0.2 - oauth2client==4.1.3 - openpyxl==3.1.5 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - parso==0.8.4 - pathspec==0.12.1 - petl==1.7.15 - pexpect==4.9.0 - platformdirs==4.3.7 - pluggy==1.5.0 - prompt-toolkit==3.0.50 - proto-plus==1.26.1 - protobuf==6.30.2 - psycopg2==2.9.10 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pycodestyle==2.13.0 - pycparser==2.22 - pydoc-markdown==4.8.2 - pydocstyle==6.3.0 - pyflakes==3.3.2 - pygments==2.19.1 - pylama==8.4.1 - pymysql==1.1.1 - pyparsing==3.2.3 - pyquery==2.0.1 - pysocks==1.7.1 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - python-dotenv==1.1.0 - python-slugify==8.0.4 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - responses==0.25.7 - rfc3986==2.0.0 - rpds-py==0.24.0 - rsa==4.9 - s3transfer==0.11.4 - savreaderwriter==3.4.2 - simpleeval==1.0.3 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - soupsieve==2.6 - sqlalchemy==2.0.40 - stack-data==0.6.3 - stringcase==1.2.0 - text-unidecode==1.3 - tinycss2==1.4.0 - tomli==2.2.1 - tomli-w==1.2.0 - tornado==6.4.2 - tqdm==4.67.1 - traitlets==5.14.3 - typeapi==2.2.4 - typing-extensions==4.13.0 - tzdata==2025.2 - unicodecsv==0.14.1 - uritemplate==4.1.1 - urllib3==1.26.20 - watchdog==6.0.0 - wcwidth==0.2.13 - webencodings==0.5.1 - werkzeug==3.1.3 - wrapt==1.17.2 - xlrd==2.0.1 - xlwt==1.3.0 - xmltodict==0.14.2 - yapf==0.43.0 - zipp==3.21.0 prefix: /opt/conda/envs/frictionless-py
[ "tests/validate/test_inquiry.py::test_validate_with_multiple_packages_with_nopool", "tests/validate/test_package.py::test_validate_with_nopool" ]
[]
[ "tests/validate/test_inquiry.py::test_validate", "tests/validate/test_package.py::test_validate", "tests/validate/test_package.py::test_validate_invalid_descriptor_path", "tests/validate/test_package.py::test_validate_invalid_package", "tests/validate/test_package.py::test_validate_invalid_package_noinfer", "tests/validate/test_package.py::test_validate_invalid_table", "tests/validate/test_package.py::test_validate_package_dialect_header_false", "tests/validate/test_package.py::test_validate_checksum", "tests/validate/test_package.py::test_validate_checksum_invalid", "tests/validate/test_package.py::test_validate_checksum_size", "tests/validate/test_package.py::test_validate_checksum_size_invalid", "tests/validate/test_package.py::test_validate_checksum_hash", "tests/validate/test_package.py::test_check_file_checksum_hash_invalid", "tests/validate/test_package.py::test_check_file_checksum_hash_not_supported_algorithm", "tests/validate/test_package.py::test_validate_package_invalid_json_issue_192", "tests/validate/test_package.py::test_composite_primary_key_unique_issue_215", "tests/validate/test_package.py::test_composite_primary_key_not_unique_issue_215", "tests/validate/test_package.py::test_validate_geopoint_required_constraint_issue_231", "tests/validate/test_package.py::test_validate_package_number_test_issue_232", "tests/validate/test_package.py::test_validate_package_with_schema_issue_348" ]
[]
MIT License
8,938
663
[ "frictionless/validate/inquiry.py", "frictionless/validate/package.py" ]
skypyproject__skypy-365
07a5b9a9751abe1c2d7ac215525acf4c6431554f
2020-11-12 10:16:59
44ed1353bce66f13de8f1f983294f86efebbbff9
JonathanDHarris: Thanks @rrjbca . I moved the import and the methods off the `SkyPyLoader` class. I was trying to neaten it up but I have to be honest- I don't entirely know what the preferred style is for Python and I was partly following PyCharm's suggestions when I did this. I can move them back how you wrote them if that's correct.
diff --git a/skypy/pipeline/_config.py b/skypy/pipeline/_config.py index f2aa6a0..1d33eea 100644 --- a/skypy/pipeline/_config.py +++ b/skypy/pipeline/_config.py @@ -3,6 +3,7 @@ from importlib import import_module import yaml import re from astropy.units import Quantity +from collections.abc import Mapping __all__ = [ 'load_skypy_yaml', @@ -22,6 +23,26 @@ def import_function(qualname): return function +def validate_keys(config): + for k in config.keys(): + if not isinstance(k, str): + raise ValueError(f"Invalid key found in config. {k} is not a string. " + f"Either rename this value or wrap it in quotes.") + + +def validate_config(config): + # Check each key at the current depth is a string + validate_keys(config) + for v in config.values(): + # If any values are dictionaries, recurse + if isinstance(v, Mapping): + validate_config(v) + # If any values are tuples (i.e. function calls) validate kwargs + if isinstance(v, tuple) and len(v) > 1 and isinstance(v[1], Mapping): + validate_keys(v[1]) + return config + + class SkyPyLoader(yaml.SafeLoader): '''custom YAML loader class with SkyPy extensions''' @@ -30,7 +51,8 @@ class SkyPyLoader(yaml.SafeLoader): '''load the first YAML document from stream''' loader = cls(stream) try: - return loader.get_single_data() + single_data = loader.get_single_data() + return validate_config(single_data if single_data else {}) finally: loader.dispose()
SkyPyLoader parses names as non-string variables The SkyPyLoader parses variable names using the same logic that it parses their values. As a result, variable names can take non-string types including int, float and astropy Quantity. Consider the config file: ```yml # config.yml 42_km: 42 km 43 km: 43 km ``` ``` python >>> from skypy.pipeline import load_skypy_yaml >>> config = load_skypy_yaml("./config.yml") >>> print(config) {'42_km': <Quantity 42. km>, <Quantity 43. km>: <Quantity 43. km>} ``` For the first entry, the key "42_km" is correctly interpreted as a string. However for the second entry the key "43 km" is interpreted as an astropy Quantity by the same logic that applies to its corresponding value. This is problematic because `Pipeline.__getitem__` assumes all variable names in `state` are strings. @ntessore pointed out that pyyaml will not let you apply different logic to keys and values when parsing files and therefore suggested that instead we enforce all variable names must be strings within the Pipeline class.
skypyproject/skypy
diff --git a/skypy/pipeline/tests/data/numeric_key.yml b/skypy/pipeline/tests/data/numeric_key.yml new file mode 100644 index 0000000..9527312 --- /dev/null +++ b/skypy/pipeline/tests/data/numeric_key.yml @@ -0,0 +1,1 @@ +43 km: 43 km diff --git a/skypy/pipeline/tests/data/numeric_kwarg.yml b/skypy/pipeline/tests/data/numeric_kwarg.yml new file mode 100644 index 0000000..7c8de18 --- /dev/null +++ b/skypy/pipeline/tests/data/numeric_kwarg.yml @@ -0,0 +1,4 @@ +tables: + test_table_1: + test_column_1: !numpy.random.uniform + 10: 10 diff --git a/skypy/pipeline/tests/data/numeric_nested_key.yml b/skypy/pipeline/tests/data/numeric_nested_key.yml new file mode 100644 index 0000000..fafa53d --- /dev/null +++ b/skypy/pipeline/tests/data/numeric_nested_key.yml @@ -0,0 +1,4 @@ +tables: + test_table_1: + 10 km: !numpy.random.uniform + size: 10 diff --git a/skypy/pipeline/tests/test_config.py b/skypy/pipeline/tests/test_config.py index 525c616..b789b15 100644 --- a/skypy/pipeline/tests/test_config.py +++ b/skypy/pipeline/tests/test_config.py @@ -41,3 +41,27 @@ def test_yaml_quantities(): assert config['42_km'] == units.Quantity('42 km') assert config['1_deg2'] == units.Quantity('1 deg2') + + +def test_keys_must_be_strings(): + # config with key that doesn't parse as String. + filename = get_pkg_data_filename('data/numeric_key.yml') + with pytest.raises(ValueError) as e: + load_skypy_yaml(filename) + assert("Invalid key found in config" in e.value.args[0]) + + +def test_nested_keys_must_be_strings(): + # config with nested key that doesn't parse as String. + filename = get_pkg_data_filename('data/numeric_nested_key.yml') + with pytest.raises(ValueError) as e: + load_skypy_yaml(filename) + assert("Invalid key found in config" in e.value.args[0]) + + +def test_kwarg_must_be_strings(): + # config with function kwarg name that doesn't parse as String. + filename = get_pkg_data_filename('data/numeric_kwarg.yml') + with pytest.raises(ValueError) as e: + load_skypy_yaml(filename) + assert("Invalid key found in config" in e.value.args[0])
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asdf==2.11.2 asdf-astropy==0.2.0 asdf-coordinates-schemas==0.1.0 asdf-standard==1.0.1 asdf-transform-schemas==0.2.2 asdf-wcs-schemas==0.1.1 astropy==4.3.1 attrs==24.2.0 certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 exceptiongroup==1.2.2 gwcs==0.18.1 hypothesis==6.79.4 importlib-metadata==6.7.0 importlib-resources==5.12.0 iniconfig==2.0.0 jmespath==1.0.1 jsonschema==4.9.1 ndcube==2.0.3 networkx==2.6.3 numpy==1.21.6 packaging==24.0 pkgutil_resolve_name==1.3.10 pluggy==1.2.0 pyerfa==2.0.0.3 pyrsistent==0.19.3 pytest==7.4.4 pytest-arraydiff==0.6.1 pytest-astropy==0.11.0 pytest-astropy-header==0.2.2 pytest-cov==4.1.0 pytest-doctestplus==1.0.0 pytest-filter-subpackage==0.1.2 pytest-mock==3.11.1 pytest-remotedata==0.4.1 pytest-rerunfailures==13.0 PyYAML==6.0.1 scipy==1.7.3 semantic-version==2.10.0 six==1.17.0 -e git+https://github.com/skypyproject/skypy.git@07a5b9a9751abe1c2d7ac215525acf4c6431554f#egg=skypy skypy-data @ https://github.com/skypyproject/skypy-data/archive/master.tar.gz sortedcontainers==2.4.0 specutils==1.8.1 tomli==2.0.1 typing_extensions==4.7.1 zipp==3.15.0
name: skypy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asdf==2.11.2 - asdf-astropy==0.2.0 - asdf-coordinates-schemas==0.1.0 - asdf-standard==1.0.1 - asdf-transform-schemas==0.2.2 - asdf-wcs-schemas==0.1.1 - astropy==4.3.1 - attrs==24.2.0 - coverage==7.2.7 - exceptiongroup==1.2.2 - gwcs==0.18.1 - hypothesis==6.79.4 - importlib-metadata==6.7.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - jmespath==1.0.1 - jsonschema==4.9.1 - ndcube==2.0.3 - networkx==2.6.3 - numpy==1.21.6 - packaging==24.0 - pkgutil-resolve-name==1.3.10 - pluggy==1.2.0 - pyerfa==2.0.0.3 - pyrsistent==0.19.3 - pytest==7.4.4 - pytest-arraydiff==0.6.1 - pytest-astropy==0.11.0 - pytest-astropy-header==0.2.2 - pytest-cov==4.1.0 - pytest-doctestplus==1.0.0 - pytest-filter-subpackage==0.1.2 - pytest-mock==3.11.1 - pytest-remotedata==0.4.1 - pytest-rerunfailures==13.0 - pyyaml==6.0.1 - scipy==1.7.3 - semantic-version==2.10.0 - six==1.17.0 - skypy==0.4.dev12+g07a5b9a - skypy-data==0.1.0 - sortedcontainers==2.4.0 - specutils==1.8.1 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 prefix: /opt/conda/envs/skypy
[ "skypy/pipeline/tests/test_config.py::test_keys_must_be_strings", "skypy/pipeline/tests/test_config.py::test_nested_keys_must_be_strings", "skypy/pipeline/tests/test_config.py::test_kwarg_must_be_strings" ]
[]
[ "skypy/pipeline/tests/test_config.py::test_load_skypy_yaml", "skypy/pipeline/tests/test_config.py::test_yaml_quantities" ]
[]
BSD 3-Clause "New" or "Revised" License
8,939
414
[ "skypy/pipeline/_config.py" ]
googleapis__python-firestore-254
75d0a4821b09c3bed710353cf86082e41c28191f
2020-11-12 16:21:43
ca7cc5bd64f3eec6d153ad179ad81330186713d2
diff --git a/google/cloud/firestore_v1/base_client.py b/google/cloud/firestore_v1/base_client.py index f532ec1..7b9b228 100644 --- a/google/cloud/firestore_v1/base_client.py +++ b/google/cloud/firestore_v1/base_client.py @@ -25,6 +25,7 @@ In the hierarchy of API concepts """ import os +import grpc # type: ignore import google.api_core.client_options # type: ignore import google.api_core.path_template # type: ignore @@ -147,9 +148,7 @@ class BaseClient(ClientWithProject): # We need this in order to set appropriate keepalive options. if self._emulator_host is not None: - # TODO(microgen): this likely needs to be adapted to use insecure_channel - # on new generated surface. - channel = transport.create_channel(host=self._emulator_host) + channel = grpc.insecure_channel(self._emulator_host) else: channel = transport.create_channel( self._target, diff --git a/noxfile.py b/noxfile.py index 0f79223..567f6bd 100644 --- a/noxfile.py +++ b/noxfile.py @@ -27,10 +27,11 @@ PYTYPE_VERSION = "pytype==2020.7.24" BLACK_VERSION = "black==19.10b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.9" -SYSTEM_TEST_PYTHON_VERSIONS = ["3.9"] +DEFAULT_PYTHON_VERSION = "3.8" +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): """Run linters. @@ -98,6 +99,7 @@ def default(session): *session.posargs, ) + @nox.session(python=UNIT_TEST_PYTHON_VERSIONS) def unit(session): """Run the unit test suite."""
Emulator with Anonymous Credentials is No Longer Possible Previously on version `1.9.0`, setting the environment variable `FIRESTORE_EMULATOR_HOST` and providing `AnonymousCredentials` allowed the transport to be established successfully. After upgrading to `2.0.0`, we are receiving a `DefaultCredentialsError` instead. When `FIRESTORE_EMULATOR_HOST` is set, the credentials provided to the client are no longer respected. #### Environment details Running an instance of the Firestore emulator inside of a Docker container - OS type and version: CentOS 7.5 - Python version: 3.8 - pip version: 20.2 - `google-cloud-firestore` version: 2.0.0 - `google-auth` version: 1.23.0 #### Steps to reproduce 1. Set `FIRESTORE_EMULATOR_HOST` environment variable `e.g. FIRESTORE_EMULATOR_HOST=emulator_host:8200` 2. Instantiate a new Firestore client by providing `AnonymousCredentials` 3. Execute an example query/request using this client #### Code example ```python # e.g. export FIRESTORE_EMULATOR_HOST=emulator_host:8200 from google.auth.credentials import AnonymousCredentials from google.cloud.firestore import Client credentials = AnonymousCredentials() client = Client(project="my-project", credentials=credentials) product = db.collection("products").document("product_abcd").get() print(product) ``` #### Stack trace ``` File "/pyenv/versions/test-app/lib/python3.8/site-packages/google/cloud/firestore_v1/document.py", line 359, in get firestore_api = self._client._firestore_api File "/pyenv/versions/test-app/lib/python3.8/site-packages/google/cloud/firestore_v1/client.py", line 104, in _firestore_api return self._firestore_api_helper( File "/pyenv/versions/test-app/lib/python3.8/site-packages/google/cloud/firestore_v1/base_client.py", line 152, in _firestore_api_helper channel = transport.create_channel(host=self._emulator_host) File "/pyenv/versions/test-app/lib/python3.8/site-packages/google/cloud/firestore_v1/services/firestore/transports/grpc.py", line 223, in create_channel return grpc_helpers.create_channel( File "/pyenv/versions/test-app/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 275, in create_channel composite_credentials = _create_composite_credentials( File "/pyenv/versions/test-app/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 217, in _create_composite_credentials credentials, _ = google.auth.default(scopes=scopes) File "/pyenv/versions/test-app/lib/python3.8/site-packages/google/auth/_default.py", line 356, in default raise exceptions.DefaultCredentialsError(_HELP_MESSAGE) google.auth.exceptions.DefaultCredentialsError: Could not automatically determine credentials. Please set GOOGLE_APPLICATION_CREDENTIALS or explicitly create credentials and re-run the application. For more information, please see https://cloud.google.com/docs/authentication/getting-started ```
googleapis/python-firestore
diff --git a/tests/unit/v1/test_base_client.py b/tests/unit/v1/test_base_client.py index 631733e..163ea33 100644 --- a/tests/unit/v1/test_base_client.py +++ b/tests/unit/v1/test_base_client.py @@ -67,8 +67,7 @@ class TestBaseClient(unittest.TestCase): return_value=mock.sentinel.firestore_api, ) @mock.patch( - "google.cloud.firestore_v1.services.firestore.transports.grpc.FirestoreGrpcTransport.create_channel", - autospec=True, + "grpc.insecure_channel", autospec=True, ) def test__firestore_api_property_with_emulator( self, mock_insecure_channel, mock_client @@ -83,7 +82,7 @@ class TestBaseClient(unittest.TestCase): self.assertIs(firestore_api, mock_client.return_value) self.assertIs(firestore_api, client._firestore_api_internal) - mock_insecure_channel.assert_called_once_with(host=emulator_host) + mock_insecure_channel.assert_called_once_with(emulator_host) # Call again to show that it is cached, but call count is still 1. self.assertIs(client._firestore_api, mock_client.return_value)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
2.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-asyncio", "aiounittest", "mock", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiounittest==1.5.0 cachetools==4.2.4 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work google-api-core==1.34.1 google-auth==1.35.0 google-cloud-core==1.7.3 -e git+https://github.com/googleapis/python-firestore.git@75d0a4821b09c3bed710353cf86082e41c28191f#egg=google_cloud_firestore googleapis-common-protos==1.69.2 grpcio==1.71.0 grpcio-status==1.49.0rc1 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mock==5.2.0 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work proto-plus==1.26.1 protobuf==3.20.3 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest @ file:///croot/pytest_1738938843180/work pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytz==2025.2 requests==2.32.3 rsa==4.9 six==1.17.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0 urllib3==2.3.0 wrapt==1.17.2
name: python-firestore channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiounittest==1.5.0 - cachetools==4.2.4 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - google-api-core==1.34.1 - google-auth==1.35.0 - google-cloud-core==1.7.3 - googleapis-common-protos==1.69.2 - grpcio==1.71.0 - grpcio-status==1.49.0rc1 - idna==3.10 - mock==5.2.0 - proto-plus==1.26.1 - protobuf==3.20.3 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytz==2025.2 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - wrapt==1.17.2 prefix: /opt/conda/envs/python-firestore
[ "tests/unit/v1/test_base_client.py::TestBaseClient::test__firestore_api_property_with_emulator" ]
[]
[ "tests/unit/v1/test_base_client.py::TestBaseClient::test___database_string_property", "tests/unit/v1/test_base_client.py::TestBaseClient::test___rpc_metadata_property", "tests/unit/v1/test_base_client.py::TestBaseClient::test__firestore_api_property", "tests/unit/v1/test_base_client.py::TestBaseClient::test__rpc_metadata_property_with_emulator", "tests/unit/v1/test_base_client.py::TestBaseClient::test_field_path", "tests/unit/v1/test_base_client.py::TestBaseClient::test_write_bad_arg", "tests/unit/v1/test_base_client.py::TestBaseClient::test_write_multiple_args", "tests/unit/v1/test_base_client.py::TestBaseClient::test_write_open_neither_arg", "tests/unit/v1/test_base_client.py::TestBaseClient::test_write_option_exists", "tests/unit/v1/test_base_client.py::TestBaseClient::test_write_option_last_update", "tests/unit/v1/test_base_client.py::Test__reference_info::test_it", "tests/unit/v1/test_base_client.py::Test__get_reference::test_failure", "tests/unit/v1/test_base_client.py::Test__get_reference::test_success", "tests/unit/v1/test_base_client.py::Test__parse_batch_get::test_found", "tests/unit/v1/test_base_client.py::Test__parse_batch_get::test_missing", "tests/unit/v1/test_base_client.py::Test__parse_batch_get::test_unknown_result_type", "tests/unit/v1/test_base_client.py::Test__parse_batch_get::test_unset_result_type", "tests/unit/v1/test_base_client.py::Test__get_doc_mask::test_none", "tests/unit/v1/test_base_client.py::Test__get_doc_mask::test_paths" ]
[]
Apache License 2.0
8,942
515
[ "google/cloud/firestore_v1/base_client.py", "noxfile.py" ]
pvlib__pvanalytics-97
6eefb0af84ec5959204aa082a32628fa374b9af7
2020-11-13 20:10:44
6eefb0af84ec5959204aa082a32628fa374b9af7
wfvining: Coveralls is reporting a coverage change; however, no change is shown in the coverage reports. I'm going to chalk that up to a mistake on the coveralls/CI side.
diff --git a/pvanalytics/features/clipping.py b/pvanalytics/features/clipping.py index 45f4bd7..91af4ea 100644 --- a/pvanalytics/features/clipping.py +++ b/pvanalytics/features/clipping.py @@ -79,16 +79,18 @@ def levels(ac_power, window=4, fraction_in_window=0.75, for more information. """ + power = ac_power.copy() + power.dropna(inplace=True) num_bins = np.ceil(1.0 / rtol).astype(int) - flags = pd.Series(index=ac_power.index, data=False) - power_plateaus, bins = _detect_levels(ac_power, count=levels, + flags = pd.Series(index=power.index, data=False) + power_plateaus, bins = _detect_levels(power, count=levels, num_bins=num_bins) for lower, upper in power_plateaus: - temp = pd.Series(index=ac_power.index, data=0.0) - temp.loc[(ac_power >= lower) & (ac_power <= upper)] = 1.0 + temp = pd.Series(index=power.index, data=0.0) + temp.loc[(power >= lower) & (power <= upper)] = 1.0 flags = flags | _label_clipping(temp, window=window, frac=fraction_in_window) - return flags + return flags.reindex_like(ac_power).fillna(False) def _daytime_powercurve(ac_power, power_quantile):
Handle missing data gracefully in clipping.levels() `features.clipping.levels()` raises some very difficult to decipher value errors form numpy when there are NAs in the data. We should either raise a more meaningful and useful exception or simply deal with NAs by dropping them, applying the clipping filter, reindexing, and filling missing values with `False`.
pvlib/pvanalytics
diff --git a/pvanalytics/tests/features/test_clipping.py b/pvanalytics/tests/features/test_clipping.py index 347ff40..d2da7cc 100644 --- a/pvanalytics/tests/features/test_clipping.py +++ b/pvanalytics/tests/features/test_clipping.py @@ -71,6 +71,16 @@ def test_levels_two_periods(quadratic, quadratic_clipped): assert not clipped[50:].any() +def test_levels_missing_data(quadratic, quadratic_clipped): + quadratic[10:20] = np.nan + quadratic_clipped[10:20] = np.nan + assert_series_equal( + pd.Series(False, quadratic.index), + clipping.levels(quadratic, window=10) + ) + assert not clipping.levels(quadratic_clipped, window=10)[10:20].any() + + def test_threshold_no_clipping(quadratic): """In a data set with a single quadratic there is no clipping.""" quadratic.index = pd.date_range(
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 Babel==2.14.0 certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 coverage==7.2.7 docutils==0.20.1 exceptiongroup==1.2.2 h5py==3.8.0 idna==3.10 imagesize==1.4.1 importlib-metadata==6.7.0 iniconfig==2.0.0 Jinja2==3.1.6 MarkupSafe==2.1.5 numpy==1.21.6 packaging==24.0 pandas==1.0.5 patsy==1.0.1 pluggy==1.2.0 -e git+https://github.com/pvlib/pvanalytics.git@6eefb0af84ec5959204aa082a32628fa374b9af7#egg=pvanalytics pvlib==0.10.3 Pygments==2.17.2 pytest==7.4.4 pytest-cov==4.1.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.31.0 ruptures==1.1.9 scipy==1.7.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==2.2.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 statsmodels==0.13.5 tomli==2.0.1 typing_extensions==4.7.1 urllib3==2.0.7 zipp==3.15.0
name: pvanalytics channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - babel==2.14.0 - charset-normalizer==3.4.1 - coverage==7.2.7 - docutils==0.20.1 - exceptiongroup==1.2.2 - h5py==3.8.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - jinja2==3.1.6 - markupsafe==2.1.5 - numpy==1.21.6 - packaging==24.0 - pandas==1.0.5 - patsy==1.0.1 - pluggy==1.2.0 - pvlib==0.10.3 - pygments==2.17.2 - pytest==7.4.4 - pytest-cov==4.1.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.31.0 - ruptures==1.1.9 - scipy==1.7.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==2.2.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - statsmodels==0.13.5 - tomli==2.0.1 - typing-extensions==4.7.1 - urllib3==2.0.7 - zipp==3.15.0 prefix: /opt/conda/envs/pvanalytics
[ "pvanalytics/tests/features/test_clipping.py::test_levels_missing_data" ]
[]
[ "pvanalytics/tests/features/test_clipping.py::test_levels", "pvanalytics/tests/features/test_clipping.py::test_levels_no_clipping", "pvanalytics/tests/features/test_clipping.py::test_levels_compound", "pvanalytics/tests/features/test_clipping.py::test_levels_compound_clipped", "pvanalytics/tests/features/test_clipping.py::test_levels_two_periods", "pvanalytics/tests/features/test_clipping.py::test_threshold_no_clipping", "pvanalytics/tests/features/test_clipping.py::test_threshold_no_clipping_with_night", "pvanalytics/tests/features/test_clipping.py::test_threshold_clipping", "pvanalytics/tests/features/test_clipping.py::test_threshold_clipping_with_night", "pvanalytics/tests/features/test_clipping.py::test_threshold_clipping_with_freq", "pvanalytics/tests/features/test_clipping.py::test_threshold_clipping_with_interruption", "pvanalytics/tests/features/test_clipping.py::test_threshold_clipping_four_days", "pvanalytics/tests/features/test_clipping.py::test_threshold_no_clipping_four_days" ]
[]
MIT License
8,950
348
[ "pvanalytics/features/clipping.py" ]
mmngreco__IneqPy-17
0223d5fb125a6561633c9849817c1b299da84a4e
2020-11-15 19:09:49
0223d5fb125a6561633c9849817c1b299da84a4e
diff --git a/ineqpy/api.py b/ineqpy/api.py index 07167e1..fab2179 100644 --- a/ineqpy/api.py +++ b/ineqpy/api.py @@ -1,22 +1,18 @@ -"""This module extend pandas.DataFrames with the main functions from statistics and -inequality modules. +"""This module extend pandas.DataFrames with the main functions from statistics +and inequality modules. """ -from . import _statistics from . import inequality from . import statistics -from . import utils -from .statistics import mean from functools import partial from types import MethodType import inspect -import numpy as np import pandas as pd -class Convey(pd.DataFrame): +class Convey: def __init__( self, data=None, @@ -26,9 +22,7 @@ class Convey(pd.DataFrame): group=None, **kw ): - super(Convey, self).__init__( - data=data, index=index, columns=columns, **kw - ) + self.df = pd.DataFrame(data=data, index=index, columns=columns, **kw) self.weights = weights self.group = group self._attach_method(statistics, self) @@ -38,12 +32,13 @@ class Convey(pd.DataFrame): def _constructor(self): return Survey - @staticmethod + @classmethod def _attach_method(module, instance): # get methods names contained in module res_names = list() res_methods = list() method_name_list = inspect.getmembers(module, inspect.isfunction) + for method_name, func in method_name_list: # if method_name.startswith('_'): continue # avoid private methods func = getattr(module, method_name) # get function @@ -57,10 +52,8 @@ class Convey(pd.DataFrame): res_names.append(method_name) setattr(instance, method_name, func) - _constructor_sliced = pd.Series - -class Survey(pd.DataFrame): +class Survey: def __init__( self, data=None, @@ -70,18 +63,10 @@ class Survey(pd.DataFrame): group=None, **kw ): - super(Survey, self).__init__( - data=data, index=index, columns=columns, **kw - ) + self.df = pd.DataFrame(data=data, index=index, columns=columns, **kw) self.weights = weights self.group = group - @property - def _constructor(self): - return Survey - - _constructor_sliced = pd.Series - def c_moment( self, variable=None, weights=None, order=2, param=None, ddof=0 ): @@ -120,9 +105,12 @@ class Survey(pd.DataFrame): Implement: https://en.wikipedia.org/wiki/L-moment#cite_note-wang:96-6 """ + data = self.df + if weights is None: weights = self.weights - return statistics.c_moment(variable, weights, self, order, param, ddof) + + return statistics.c_moment(variable, weights, data, order, param, ddof) def percentile( self, variable=None, weights=None, p=50, interpolate="lower" @@ -144,9 +132,10 @@ class Survey(pd.DataFrame): percentile : float or pd.Series """ + data = self.df if weights is None: weights = self.weights - return statistics.percentile(variable, weights, self, p, interpolate) + return statistics.percentile(variable, weights, data, p, interpolate) def std_moment( self, variable=None, weights=None, param=None, order=3, ddof=0 @@ -186,10 +175,11 @@ class Survey(pd.DataFrame): implementation. """ + data = self.df if weights is None: weights = self.weights return statistics.std_moment( - variable, weights, self, param, order, ddof + variable, weights, data, param, order, ddof ) def mean(self, variable=None, weights=None): @@ -211,9 +201,10 @@ class Survey(pd.DataFrame): mean : array-like or float """ # if pass a DataFrame separate variables. + data = self.df if weights is None: weights = self.weights - return statistics.mean(variable, weights, self) + return statistics.mean(variable, weights, data) def density(self, variable=None, weights=None, groups=None): """Calculates density in percentage. This make division of variable @@ -237,9 +228,10 @@ class Survey(pd.DataFrame): Retrieved: https://en.wikipedia.org/w/index.php?title=Histogram&oldid=779516918 """ + data = self.df if weights is None: weights = self.weights - return statistics.density(variable, weights, groups, self) + return statistics.density(variable, weights, groups, data) def var(self, variable=None, weights=None, ddof=0): """Calculate the population variance of `variable` given `weights`. @@ -271,9 +263,10 @@ class Survey(pd.DataFrame): ----- If stratificated sample must pass with groupby each strata. """ + data = self.df if weights is None: weights = self.weights - return statistics.var(variable, weights, self, ddof) + return statistics.var(variable, weights, data, ddof) def coef_variation(self, variable=None, weights=None): """Calculate the coefficient of variation of a `variable` given weights. @@ -301,9 +294,10 @@ class Survey(pd.DataFrame): oldid=778842331 """ # TODO complete docstring + data = self.df if weights is None: weights = self.weights - return statistics.coef_variation(variable, weights, self) + return statistics.coef_variation(variable, weights, data) def kurt(self, variable=None, weights=None): """Calculate the asymmetry coefficient @@ -330,9 +324,10 @@ class Survey(pd.DataFrame): ----- It is an alias of the standardized fourth-order moment. """ + data = self.df if weights is None: weights = self.weights - return statistics.kurt(variable, weights, self) + return statistics.kurt(variable, weights, data) def skew(self, variable=None, weights=None): """Returns the asymmetry coefficient of a sample. @@ -361,9 +356,10 @@ class Survey(pd.DataFrame): It is an alias of the standardized third-order moment. """ + data = self.df if weights is None: weights = self.weights - return statistics.skew(variable, weights, self) + return statistics.skew(variable, weights, data) # INEQUALITY #  ---------- @@ -394,9 +390,10 @@ class Survey(pd.DataFrame): from micro-data. National Tax Journal. http://doi.org/10.2307/41788716 """ # TODO complete docstring + data = self.df if weights is None: weights = self.weights - return inequality.concentration(income, weights, self, sort) + return inequality.concentration(income, weights, data, sort) def lorenz(self, income=None, weights=None): """In economics, the Lorenz curve is a graphical representation of the @@ -430,11 +427,12 @@ class Survey(pd.DataFrame): Retrieved 14:34, May 15, 2017, from https://en.wikipedia.org/w/index.php?title=Lorenz_curve&oldid=764853675 """ + data = self.df if weights is None: weights = self.weights if income is None: income = self.income - return inequality.lorenz(income, weights, self) + return inequality.lorenz(income, weights, data) def gini(self, income=None, weights=None, sort=True): """The Gini coefficient (sometimes expressed as a Gini ratio or a @@ -490,9 +488,10 @@ class Survey(pd.DataFrame): - Implement statistical deviation calculation, VAR (GINI) """ + data = self.df if weights is None: weights = self.weights - return inequality.gini(income, weights, self, sort) + return inequality.gini(income, weights, data, sort) def atkinson(self, income=None, weights=None, e=0.5): """More precisely labelled a family of income grouped measures, the @@ -538,9 +537,10 @@ class Survey(pd.DataFrame): http://www.jstor.org/stable/41788716 - The results has difference with stata, maybe have a bug. """ + data = self.df if weights is None: weights = self.weights - return inequality.atkinson(income, weights, self, e) + return inequality.atkinson(income, weights, data, e) def kakwani(self, tax=None, income_pre_tax=None, weights=None): """The Kakwani (1977) index of tax progressivity is defined as twice the @@ -576,9 +576,10 @@ class Survey(pd.DataFrame): micro-data. National Tax Journal. http://doi.org/10.2307/41788716 """ # main calc + data = self.df if weights is None: weights = self.weights - return inequality.kakwani(tax, income_pre_tax, weights, self) + return inequality.kakwani(tax, income_pre_tax, weights, data) def reynolds_smolensky( self, income_pre_tax=None, income_post_tax=None, weights=None @@ -614,10 +615,11 @@ class Survey(pd.DataFrame): Jenkins, S. (1988). Calculating income distribution indices from micro-data. National Tax Journal. http://doi.org/10.2307/41788716 """ + data = self.df if weights is None: weights = self.weights return inequality.reynolds_smolensky( - income_pre_tax, income_post_tax, weights, self + income_pre_tax, income_post_tax, weights, data ) def theil(self, income=None, weights=None): @@ -650,9 +652,11 @@ class Survey(pd.DataFrame): https://en.wikipedia.org/w/index.php?title=Theil_index&oldid=755407818 """ + data = self.df if weights is None: weights = self.weights - return inequality.theil(income, weights, self) + + return inequality.theil(income, weights, data) def avg_tax_rate(self, total_tax=None, total_base=None, weights=None): """This function compute the average tax rate given a base income and a @@ -675,6 +679,8 @@ class Survey(pd.DataFrame): (2011). Panel de declarantes de IRPF 1999-2007: Metodología, estructura y variables. Documentos. """ + data = self.df if weights is None: weights = self.weights - return inequality.avg_tax_rate(total_tax, total_base, weights, self) + + return inequality.avg_tax_rate(total_tax, total_base, weights, data) diff --git a/ineqpy/inequality.py b/ineqpy/inequality.py index de8ffdf..1bd07ab 100644 --- a/ineqpy/inequality.py +++ b/ineqpy/inequality.py @@ -96,16 +96,20 @@ def lorenz(income, weights=None, data=None): total_income = income * weights idx_sort = np.argsort(income) + weights = weights[idx_sort].cumsum() / weights.sum() weights = weights.reshape(len(weights), 1) + total_income = total_income[idx_sort].cumsum() / total_income.sum() total_income = total_income.reshape(len(total_income), 1) - res = pd.DataFrame( - np.c_[weights, total_income], - columns=["Equality", "Income"], - index=weights, - ) + + # to pandas + data = np.hstack([weights, total_income]) + columns = ["Equality", "Income"] + index = pd.Index(weights.round(3).squeeze()) + res = pd.DataFrame(data=data, columns=columns, index=index) res.index.name = "x" + return res
Can not retrieve DataFrame Hello, this works: svy.lorenz('Reduced.Lunch').plot(legend=False,colors=['silver', 'black']) BUT I can not get the data frame!!! svy.lorenz('Reduced.Lunch') gives me: AttributeError: 'numpy.ndarray' object has no attribute 'endswith' Can you help me ?
mmngreco/IneqPy
diff --git a/tests/test_api.py b/tests/test_api.py index 47c37ee..cdfbf2a 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -1,5 +1,6 @@ -import numpy as np import ineqpy +import numpy as np +import pandas as pd def test_api(): @@ -7,17 +8,34 @@ def test_api(): # only checks that all methods works. svy = ineqpy.api.Survey data = np.random.randint(0, 100, (int(1e3), 3)) - w = np.random.randint(1, 10, int(1e3)) - data = np.c_[data, w] + w = np.random.randint(1, 10, int(1e3)).reshape(-1, 1) + data = np.hstack([data, w]) columns = list("abcw") + try: + df = svy(data=data, columns=columns, weights="w") + df.weights + df.mean("a") + df.var("a") + df.skew("a") + df.kurt("a") + df.gini("a") + df.atkinson("a") + df.theil("a") + df.percentile("a") + assert True + except Exception as e: + assert False, e + + +def test_df(): + # GH #15 + LEN = 10 + values = [np.arange(LEN), np.random.randint(1, 10, LEN)] + df = pd.DataFrame(values, index=["x", "n"]).T - df = svy(data=data, columns=columns, weights="w") - df.weights - df.mean("a") - df.var("a") - df.skew("a") - df.kurt("a") - df.gini("a") - df.atkinson("a") - df.theil("a") - df.percentile("a") + try: + svy = ineqpy.api.Survey(df, df.index, df.columns, weights="n") + svy.lorenz("x") + assert True + except Exception as e: + assert False, e
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[full]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-sugar", "check-manifest", "pydocstyle", "flake8", "black" ], "pre_install": null, "python": "3.9", "reqs_path": [ "test_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==25.1.0 build==1.2.2.post1 check-manifest==0.50 click==8.1.8 coverage==7.8.0 exceptiongroup==1.2.2 flake8==7.2.0 importlib_metadata==8.6.1 -e git+https://github.com/mmngreco/IneqPy.git@0223d5fb125a6561633c9849817c1b299da84a4e#egg=IneqPy iniconfig==2.1.0 llvmlite==0.43.0 mccabe==0.7.0 mypy-extensions==1.0.0 numba==0.60.0 numpy==2.0.2 packaging==24.2 pandas==2.2.3 pathspec==0.12.1 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pydocstyle==6.3.0 pyflakes==3.3.1 pyproject_hooks==1.2.0 pytest==8.3.5 pytest-cov==6.0.0 pytest-sugar==1.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 snowballstemmer==2.2.0 termcolor==2.5.0 tomli==2.2.1 typing_extensions==4.13.0 tzdata==2025.2 zipp==3.21.0
name: IneqPy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==25.1.0 - build==1.2.2.post1 - check-manifest==0.50 - click==8.1.8 - coverage==7.8.0 - exceptiongroup==1.2.2 - flake8==7.2.0 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - llvmlite==0.43.0 - mccabe==0.7.0 - mypy-extensions==1.0.0 - numba==0.60.0 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pathspec==0.12.1 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pydocstyle==6.3.0 - pyflakes==3.3.1 - pyproject-hooks==1.2.0 - pytest==8.3.5 - pytest-cov==6.0.0 - pytest-sugar==1.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - snowballstemmer==2.2.0 - termcolor==2.5.0 - tomli==2.2.1 - typing-extensions==4.13.0 - tzdata==2025.2 - zipp==3.21.0 prefix: /opt/conda/envs/IneqPy
[ "tests/test_api.py::test_df" ]
[]
[ "tests/test_api.py::test_api" ]
[]
MIT License
8,961
2,989
[ "ineqpy/api.py", "ineqpy/inequality.py" ]
mmngreco__IneqPy-19
5b4d40b9b77304da1e7a883701b21b655d8bb4d4
2020-11-15 20:05:28
5b4d40b9b77304da1e7a883701b21b655d8bb4d4
mmngreco: Needs test
diff --git a/src/ineqpy/inequality.py b/src/ineqpy/inequality.py index edffbaf..b11348c 100644 --- a/src/ineqpy/inequality.py +++ b/src/ineqpy/inequality.py @@ -28,6 +28,7 @@ __all__ = [ "reynolds_smolensky", "theil", "ratio_top_rest", + "hoover", ] @@ -516,3 +517,55 @@ def ratio_top_rest(income, weights=None, data=None, top_percentage=10.0): r += error return t / r + + +def hoover(income, weights=None, data=None): + """Calculate Hoover index. + + The Hoover index, also known as the Robin Hood index or the Schutz index, + is a measure of income metrics. It is equal to the portion of the total + community income that would have to be redistributed (taken from the richer + half of the population and given to the poorer half) for there to be income + uniformity. + + Formula: + + H = 1/2 sum_i( |xi - mu| ) / sum_i(xi) + + Parameters + ---------- + income : array-like or str + This variable represent tax payment of person, if pass array-like + then data must be None, else you pass str-name column in `data`. + weights : array-like or str + This variable represent weights of each person, if pass array-like + then data must be None, else you pass str-name column in `data`. + data : pandas.DataFrame + This variable is a DataFrame that contains all data required in it's + columns. + + Returns + ------- + hoover : float + + References + ---------- + Hoover index : https://en.wikipedia.org/wiki/Hoover_index + """ + if data is not None: + income, weights = utils.extract_values(data, income, weights) + else: + income = income.copy() + weights = weights.copy() + + income, weights = utils.not_null_condition(income, weights) + + # variables needed + mu = mean(variable=income, weights=weights) + f_i = utils.normalize(weights) + xi = f_i * income + + # main calc + h = np.sum(abs(xi - mu)) * 0.5 / sum(xi) + + return h
Hoover index Hi, do you plan to add this index to your nice project? The project is very useful, thanks!
mmngreco/IneqPy
diff --git a/tests/test_inequality.py b/tests/test_inequality.py index 88b7e79..6e527c3 100644 --- a/tests/test_inequality.py +++ b/tests/test_inequality.py @@ -120,7 +120,7 @@ def test_ratio_weighted_eq_unweighted(n): xw = [] for xi, wi in zip(x,w): xw += [xi]*wi # Create a list that contains - + xw = np.array(xw) assert len(xw) == np.sum(w) @@ -140,3 +140,11 @@ def test_ratio_unweighted(): obtained = inequality.ratio_top_rest(x) expected = 0.22203712517848642 assert pytest.approx(obtained) == expected + + +def test_hoover_index(): + x = np.arange(10) + w = np.ones(10) + obtained = inequality.hoover(x, w) + expected = 4 + np.testing.assert_almost_equal(obtained, expected)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.8", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 astroid==3.2.4 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 docutils==0.20.1 exceptiongroup==1.2.2 idna==3.10 imagesize==1.4.1 importlib_metadata==8.5.0 -e git+https://github.com/mmngreco/IneqPy.git@5b4d40b9b77304da1e7a883701b21b655d8bb4d4#egg=IneqPy iniconfig==2.1.0 Jinja2==3.1.6 llvmlite==0.41.1 markdown-it-py==3.0.0 MarkupSafe==2.1.5 mdit-py-plugins==0.4.2 mdurl==0.1.2 myst-parser==3.0.1 numba==0.58.1 numpy==1.24.4 numpydoc==1.7.0 packaging==24.2 pandas==2.0.3 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 requests==2.32.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.1.2 sphinx-autoapi==3.5.0 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==1.0.4 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.1 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 stdlib-list==0.10.0 tabulate==0.9.0 tomli==2.2.1 typing_extensions==4.13.0 tzdata==2025.2 urllib3==2.2.3 zipp==3.20.2
name: IneqPy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - astroid==3.2.4 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - docutils==0.20.1 - exceptiongroup==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.5.0 - ineqpy==0.3.0+7.g5b4d40b - iniconfig==2.1.0 - jinja2==3.1.6 - llvmlite==0.41.1 - markdown-it-py==3.0.0 - markupsafe==2.1.5 - mdit-py-plugins==0.4.2 - mdurl==0.1.2 - myst-parser==3.0.1 - numba==0.58.1 - numpy==1.24.4 - numpydoc==1.7.0 - packaging==24.2 - pandas==2.0.3 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - requests==2.32.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.1.2 - sphinx-autoapi==3.5.0 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==1.0.4 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.1 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stdlib-list==0.10.0 - tabulate==0.9.0 - tomli==2.2.1 - typing-extensions==4.13.0 - tzdata==2025.2 - urllib3==2.2.3 - zipp==3.20.2 prefix: /opt/conda/envs/IneqPy
[ "tests/test_inequality.py::test_hoover_index" ]
[]
[ "tests/test_inequality.py::test_gini_2d", "tests/test_inequality.py::test_gini_1d", "tests/test_inequality.py::test_gini_1d_0_w", "tests/test_inequality.py::test_gini_1d_0_series", "tests/test_inequality.py::test_gini_1d_1_series", "tests/test_inequality.py::test_gini_1d_1_w", "tests/test_inequality.py::test_atkinson_2d", "tests/test_inequality.py::test_atkinson_1d", "tests/test_inequality.py::test_atkinson_1d_1_w", "tests/test_inequality.py::test_theil_1d_1_w", "tests/test_inequality.py::test_ratio_eqaulity", "tests/test_inequality.py::test_ratio_equality_fracc", "tests/test_inequality.py::test_ratio_1d", "tests/test_inequality.py::test_ratio_2d", "tests/test_inequality.py::test_ratio_weighted_eq_unweighted[15]", "tests/test_inequality.py::test_ratio_weighted_eq_unweighted[16]", "tests/test_inequality.py::test_ratio_weighted_eq_unweighted[17]", "tests/test_inequality.py::test_ratio_weighted_eq_unweighted[18]", "tests/test_inequality.py::test_ratio_weighted_eq_unweighted[19]", "tests/test_inequality.py::test_ratio_unweighted" ]
[]
MIT License
8,962
579
[ "src/ineqpy/inequality.py" ]
SirAnthony__slpp-32
b9474965a8fd5759847c8cbdfa873069a43a0bb8
2020-11-17 13:03:09
b9474965a8fd5759847c8cbdfa873069a43a0bb8
diff --git a/slpp.py b/slpp.py index 19a6405..9f35cc0 100644 --- a/slpp.py +++ b/slpp.py @@ -43,9 +43,6 @@ class SLPP(object): def decode(self, text): if not text or not isinstance(text, six.string_types): return - # FIXME: only short comments removed - reg = re.compile('--.*$', re.M) - text = reg.sub('', text, 0) self.text = text self.at, self.ch, self.depth = 0, '', 0 self.len = len(text) @@ -101,6 +98,18 @@ class SLPP(object): else: break + self.skip_comments() + + def skip_comments(self): + if self.ch == '-' and self.text[self.at] == '-': + # `--` is a comment, skip to next new line + while self.ch: + if re.match('\n', self.ch): + self.white() + break + else: + self.next_chr() + def next_chr(self): if self.at >= self.len: self.ch = None
lua.decode('"--3"') fails This looks like a bug in the string parsing to me - as if the parser tries to detect comments inside literal strings. `'--3'` is a perfectly valid lua string, of course.
SirAnthony/slpp
diff --git a/tests.py b/tests.py index 6f03ee8..9fe8353 100755 --- a/tests.py +++ b/tests.py @@ -157,5 +157,14 @@ class TestSLPP(unittest.TestCase): t('{ [5] = 111, [4] = 52.1, 43, [3] = 54.3, false, 9 }') t('{ [1] = 1, [2] = "2", 3, 4, [5] = 5 }') + def test_comments(self): + lua = '-- starting comment\n{\n["multiline_string"] = "A multiline string where one of the lines starts with\n-- two dashes",\n-- middle comment\n["another_multiline_string"] = "A multiline string where one of the lines starts with\n-- two dashes\nfollowed by another line",\n["trailing_comment"] = "A string with" -- a trailing comment\n}\n-- ending comment' + dict = { + "multiline_string": "A multiline string where one of the lines starts with\n-- two dashes", + "another_multiline_string": "A multiline string where one of the lines starts with\n-- two dashes\nfollowed by another line", + "trailing_comment": "A string with" + } + self.assertEqual(slpp.decode(lua), dict) + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 six==1.17.0 -e git+https://github.com/SirAnthony/slpp.git@b9474965a8fd5759847c8cbdfa873069a43a0bb8#egg=SLPP tomli==2.2.1
name: slpp channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/slpp
[ "tests.py::TestSLPP::test_comments" ]
[]
[ "tests.py::TestUtilityFunctions::test_differ", "tests.py::TestUtilityFunctions::test_is_iterator", "tests.py::TestSLPP::test_basic", "tests.py::TestSLPP::test_bool", "tests.py::TestSLPP::test_consistency", "tests.py::TestSLPP::test_nil", "tests.py::TestSLPP::test_numbers", "tests.py::TestSLPP::test_string", "tests.py::TestSLPP::test_table", "tests.py::TestSLPP::test_unicode" ]
[]
MIT License
8,983
280
[ "slpp.py" ]
ucfopen__canvasapi-443
e74ca803309df744b0619e35ae2df3473b828a75
2020-11-18 17:41:08
94d735ad6c691821e08966641fbeac6bb900ecf0
coveralls: [![Coverage Status](https://coveralls.io/builds/35068988/badge)](https://coveralls.io/builds/35068988) Coverage remained the same at 100.0% when pulling **8b3300b9b0acd5f674bdd2fc4d544f4029858653 on blepabyte:issue/386-encoding-fixes** into **463174ed5339acf069830e848b08ed29d8797e56 on ucfopen:develop**. Thetwam: Great find and solution. Thanks for the help @blepabyte ! Travis is being super slow, but it passed in py 3.6 there and on 3.8 on my local. Am going to override merge.
diff --git a/canvasapi/file.py b/canvasapi/file.py index ea68a52..81439f6 100644 --- a/canvasapi/file.py +++ b/canvasapi/file.py @@ -32,11 +32,15 @@ class File(CanvasObject): with open(location, "wb") as file_out: file_out.write(response.content) - def get_contents(self): + def get_contents(self, binary=False): """ Download the contents of this file. + Pass binary=True to return a bytes object instead of a str. - :rtype: str + :rtype: str or bytes """ response = self._requester.request("GET", _url=self.url) - return response.text + if binary: + return response.content + else: + return response.text diff --git a/canvasapi/requester.py b/canvasapi/requester.py index fbdfee3..2d236ce 100644 --- a/canvasapi/requester.py +++ b/canvasapi/requester.py @@ -156,7 +156,7 @@ class Requester(object): currently only the POST request of GraphQL is using this parameter. For all other methods it's just passed and ignored. :type json: `bool` - :rtype: str + :rtype: :class:`requests.Response` """ full_url = _url if _url else "{}{}".format(self.base_url, endpoint) @@ -217,9 +217,14 @@ class Requester(object): ) try: - logger.debug("Data: {data}".format(data=pformat(response.json()))) - except ValueError: - logger.debug("Data: {data}".format(data=pformat(response.text))) + logger.debug( + "Data: {data}".format(data=pformat(response.content.decode("utf-8"))) + ) + except UnicodeDecodeError: + logger.debug("Data: {data}".format(data=pformat(response.content))) + except AttributeError: + # response.content is None + logger.debug("No data") # Add response to internal cache if len(self._cache) > 4:
File.download hangs in debug statement indefinately # Describe the bug File.download hangs when downloading a ~230 MB tgz file. I waited 10+ minutes, but never long enough to see it terminate. I have not tried enough files to produce a minimal working example. Based on traceback when cancelling mid-operation, the library appears hung in this try/except block: ``` try: logger.debug("Data: {data}".format(data=pformat(response.json()))) except ValueError: logger.debug("Data: {data}".format(data=pformat(response.text))) ``` Specifically, its hung performing `response.json()` ``` ... File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/canvasapi/file.py", line 32, in download response = self._requester.request("GET", _url=self.url) File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/canvasapi/requester.py", line 226, in request logger.debug("Data: {data}".format(data=pformat(response.json()))) File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/requests/models.py", line 898, in json return complexjson.loads(self.text, **kwargs) File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/requests/models.py", line 858, in text encoding = self.apparent_encoding File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/requests/models.py", line 728, in apparent_encoding return chardet.detect(self.content)['encoding'] File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/chardet/__init__.py", line 38, in detect detector.feed(byte_str) File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/chardet/universaldetector.py", line 211, in feed if prober.feed(byte_str) == ProbingState.FOUND_IT: File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/chardet/charsetgroupprober.py", line 71, in feed state = prober.feed(byte_str) File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/chardet/sbcharsetprober.py", line 79, in feed byte_str = self.filter_international_words(byte_str) File "/Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages/chardet/charsetprober.py", line 87, in filter_international_words buf) File "/Users/gondree/anaconda/envs/hob/lib/python3.6/re.py", line 222, in findall return _compile(pattern, flags).findall(string) ``` # Expected behavior The expected behavior is that the file gets downloaded, no matter the size and no matter the contents. # Environment information - Python version (`python --version`) ``` $ python --version Python 3.6.2 :: Continuum Analytics, Inc. ``` - CanvasAPI version (`pip show canvasapi`) ``` Name: canvasapi Version: 0.16.0 Summary: API wrapper for the Canvas LMS Home-page: https://github.com/ucfopen/canvasapi Author: University of Central Florida - Center for Distributed Learning Author-email: [email protected] License: MIT License Location: /Users/gondree/anaconda/envs/hob/lib/python3.6/site-packages Requires: requests, pytz, six Required-by: ``` # Additional context I tried to implement a timeout, like: ``` ohandler = signal.getsignal(signal.SIGALRM) try: signal.signal(signal.SIGALRM, lambda a,b: (_ for _ in ()).throw(ValueError)) signal.alarm(2) logger.debug("Data: {data}".format(data=pformat(response.json()))) except ValueError: logger.debug("Data: {data}".format(data=pformat(response.text))) finally: signal.signal(signal.SIGALRM, ohandler) signal.alarm(0) ``` But, generally, that did not work correctly. The simplest fix for me was avoiding the operations when not debugging: ``` if logger.isEnabledFor(logging.DEBUG): try: logger.debug("Data: {data}".format(data=pformat(response.json()))) except ValueError: logger.debug("Data: {data}".format(data=pformat(response.text))) ``` This solved my problem. It would not be ideal if you ever needed to debug in a situation involving one of these problematic downloads. Reporting in hope that this, or a more clever fix, can be incorporated.
ucfopen/canvasapi
diff --git a/tests/test_file.py b/tests/test_file.py index 97048a5..ed7a569 100644 --- a/tests/test_file.py +++ b/tests/test_file.py @@ -53,3 +53,5 @@ class TestFile(unittest.TestCase): register_uris({"file": ["file_contents"]}, m) contents = self.file.get_contents() self.assertEqual(contents, '"Hello there"') + contents_binary = self.file.get_contents(binary=True) + self.assertEqual(contents_binary, b'"Hello there"') diff --git a/tests/test_requester.py b/tests/test_requester.py index cf85bc8..df59eb9 100644 --- a/tests/test_requester.py +++ b/tests/test_requester.py @@ -32,6 +32,18 @@ class TestRequester(unittest.TestCase): response = self.requester.request("GET", "fake_get_request") self.assertEqual(response.status_code, 200) + def test_request_get_binary(self, m): + m.register_uri( + "GET", + settings.BASE_URL_WITH_VERSION + "get_binary_data", + content=b"\xff\xff\xff", + status_code=200, + headers={}, + ) + + response = self.requester.request("GET", "get_binary_data") + self.assertEqual(response.content, b"\xff\xff\xff") + def test_request_get_datetime(self, m): date = datetime.today()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 2 }
2.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.7", "reqs_path": [ "requirements.txt", "dev_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 Babel==2.14.0 black==23.3.0 -e git+https://github.com/ucfopen/canvasapi.git@e74ca803309df744b0619e35ae2df3473b828a75#egg=canvasapi certifi @ file:///croot/certifi_1671487769961/work/certifi cfgv==3.3.1 charset-normalizer==3.4.1 click==8.1.8 coverage==7.2.7 distlib==0.3.9 docutils==0.15.2 exceptiongroup==1.2.2 execnet==2.0.2 filelock==3.12.2 flake8==5.0.4 identify==2.5.24 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 iniconfig==2.0.0 isort==5.11.5 Jinja2==3.1.6 MarkupSafe==2.1.5 mccabe==0.7.0 mypy-extensions==1.0.0 nodeenv==1.9.1 packaging==24.0 pathspec==0.11.2 platformdirs==2.6.2 pluggy==1.2.0 pre-commit==2.21.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.17.2 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 pytz==2025.2 PyYAML==6.0.1 requests==2.31.0 requests-mock==1.12.1 snowballstemmer==2.2.0 Sphinx==4.3.2 sphinx-rtd-theme==1.3.0 sphinx-version-warning==1.1.2 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==2.0.1 typed-ast==1.5.5 typing_extensions==4.7.1 urllib3==2.0.7 virtualenv==20.16.2 zipp==3.15.0
name: canvasapi channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - babel==2.14.0 - black==23.3.0 - cfgv==3.3.1 - charset-normalizer==3.4.1 - click==8.1.8 - coverage==7.2.7 - distlib==0.3.9 - docutils==0.15.2 - exceptiongroup==1.2.2 - execnet==2.0.2 - filelock==3.12.2 - flake8==5.0.4 - identify==2.5.24 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - iniconfig==2.0.0 - isort==5.11.5 - jinja2==3.1.6 - markupsafe==2.1.5 - mccabe==0.7.0 - mypy-extensions==1.0.0 - nodeenv==1.9.1 - packaging==24.0 - pathspec==0.11.2 - platformdirs==2.6.2 - pluggy==1.2.0 - pre-commit==2.21.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.17.2 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.31.0 - requests-mock==1.12.1 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinx-rtd-theme==1.3.0 - sphinx-version-warning==1.1.2 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==2.0.1 - typed-ast==1.5.5 - typing-extensions==4.7.1 - urllib3==2.0.7 - virtualenv==20.16.2 - zipp==3.15.0 prefix: /opt/conda/envs/canvasapi
[ "tests/test_file.py::TestFile::test_contents_file" ]
[]
[ "tests/test_file.py::TestFile::test__str__", "tests/test_file.py::TestFile::test_delete_file", "tests/test_file.py::TestFile::test_download_file", "tests/test_requester.py::TestRequester::test_request_400", "tests/test_requester.py::TestRequester::test_request_401_InvalidAccessToken", "tests/test_requester.py::TestRequester::test_request_401_Unauthorized", "tests/test_requester.py::TestRequester::test_request_404", "tests/test_requester.py::TestRequester::test_request_409", "tests/test_requester.py::TestRequester::test_request_422", "tests/test_requester.py::TestRequester::test_request_500", "tests/test_requester.py::TestRequester::test_request_cache", "tests/test_requester.py::TestRequester::test_request_cache_clear_after_5", "tests/test_requester.py::TestRequester::test_request_delete", "tests/test_requester.py::TestRequester::test_request_generic", "tests/test_requester.py::TestRequester::test_request_get", "tests/test_requester.py::TestRequester::test_request_get_binary", "tests/test_requester.py::TestRequester::test_request_get_datetime", "tests/test_requester.py::TestRequester::test_request_lowercase_boolean", "tests/test_requester.py::TestRequester::test_request_patch", "tests/test_requester.py::TestRequester::test_request_post", "tests/test_requester.py::TestRequester::test_request_post_datetime", "tests/test_requester.py::TestRequester::test_request_put" ]
[]
MIT License
8,988
504
[ "canvasapi/file.py", "canvasapi/requester.py" ]
Parquery__icontract-170
38968d2d4b79f44522417f40e9d8cfd6b5762870
2020-11-19 11:22:22
021f478672e480372d277d45384e3e65d1639d1a
coveralls: ## Pull Request Test Coverage Report for [Build 480](https://coveralls.io/builds/35089448) * **29** of **30** **(96.67%)** changed or added relevant lines in **2** files are covered. * No unchanged relevant lines lost coverage. * Overall coverage increased (+**0.1%**) to **92.232%** --- | Changes Missing Coverage | Covered Lines | Changed/Added Lines | % | | :-----|--------------|--------|---: | | [icontract/_recompute.py](https://coveralls.io/builds/35089448/source?filename=icontract%2F_recompute.py#L107) | 22 | 23 | 95.65% <!-- | **Total:** | **29** | **30** | **96.67%** | --> | Totals | [![Coverage Status](https://coveralls.io/builds/35089448/badge)](https://coveralls.io/builds/35089448) | | :-- | --: | | Change from base [Build 478](https://coveralls.io/builds/34999716): | 0.1% | | Covered Lines: | 1033 | | Relevant Lines: | 1120 | --- ##### 💛 - [Coveralls](https://coveralls.io)
diff --git a/icontract/_recompute.py b/icontract/_recompute.py index 00faac1..0427994 100644 --- a/icontract/_recompute.py +++ b/icontract/_recompute.py @@ -88,6 +88,44 @@ class Visitor(ast.NodeVisitor): self.recomputed_values[node] = node.value return node.value + if sys.version_info >= (3, 6): + + def visit_FormattedValue(self, node: ast.FormattedValue) -> Any: + """Format the node value.""" + fmt = ['{'] + # See https://docs.python.org/3/library/ast.html#ast.FormattedValue for these + # constants + if node.conversion == -1: + pass + elif node.conversion == 115: + fmt.append('!s') + elif node.conversion == 114: + fmt.append('!r') + elif node.conversion == 97: + fmt.append('!a') + else: + raise NotImplementedError("Unhandled conversion of a formatted value node {!r}: {}".format( + node, node.conversion)) + + if node.format_spec is not None: + fmt.append(":") + + # The following assert serves only documentation purposes so that the code is easier to follow. + assert isinstance(node.format_spec, ast.JoinedStr) + fmt.append(self.visit(node.format_spec)) + + fmt.append('}') + + recomputed_value = self.visit(node.value) + return ''.join(fmt).format(recomputed_value) + + def visit_JoinedStr(self, node: ast.JoinedStr) -> Any: + """Visit the values and concatenate them.""" + joined_str = ''.join(self.visit(value_node) for value_node in node.values) + + self.recomputed_values[node] = joined_str + return joined_str + # pylint: enable=no-member def visit_List(self, node: ast.List) -> List[Any]: diff --git a/icontract/_represent.py b/icontract/_represent.py index 934b052..f3fb8ee 100644 --- a/icontract/_represent.py +++ b/icontract/_represent.py @@ -55,6 +55,17 @@ class Visitor(ast.NodeVisitor): self.reprs = dict() # type: MutableMapping[str, str] self._atok = atok + if sys.version_info >= (3, 6): + # pylint: disable=no-member + def visit_JoinedStr(self, node: ast.JoinedStr) -> None: + """Show the whole joined strings without descending into the values.""" + if node in self._recomputed_values: + value = self._recomputed_values[node] + + if _representable(value=value): + text = self._atok.get_text(node) + self.reprs[text] = value + def visit_Name(self, node: ast.Name) -> None: """ Resolve the name from the variable look-up and the built-ins. diff --git a/precommit.py b/precommit.py index 926809e..2e9fc99 100755 --- a/precommit.py +++ b/precommit.py @@ -57,14 +57,10 @@ def main() -> int: env['ICONTRACT_SLOW'] = 'true' # yapf: disable - unittest_targets = ['tests'] - if sys.version_info > (3, 8): - unittest_targets.append('tests_3_8') - subprocess.check_call( ["coverage", "run", "--source", "icontract", - "-m", "unittest", "discover"] + unittest_targets, + "-m", "unittest", "discover"], cwd=str(repo_root), env=env) # yapf: enable
Violating contract with f-string produces NotImplementedError An f-string in a lambda expression in a contract produces a `NotImplementedError` from `incontract._recompute` when a call violates the contract. A call that conforms to the contract does not produce the error. Using Python 3.8.5 and icontract 2.3.7. Example file `test_foo.py`: ``` python from icontract import ViolationError, require from pytest import raises @require(lambda text: text != f'{text}{text}') def include(text: str) -> str: return text def test_include_a() -> None: with raises(ViolationError): include('') def test_include_b() -> None: assert include('foo') == 'foo' ``` ``` $ pytest test_foo.py ============================= test session starts ============================== platform linux -- Python 3.8.5, pytest-6.1.2, py-1.9.0, pluggy-0.13.1 rootdir: /tmp/foo collected 2 items test_foo.py F. [100%] =================================== FAILURES =================================== ________________________________ test_include_a ________________________________ def test_include_a() -> None: with raises(ViolationError): > include('') test_foo.py:10: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ test-env/lib/python3.8/site-packages/icontract/_checkers.py:373: in wrapper _assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs) test-env/lib/python3.8/site-packages/icontract/_checkers.py:152: in _assert_precondition msg = icontract._represent.generate_message(contract=contract, condition_kwargs=condition_kwargs) test-env/lib/python3.8/site-packages/icontract/_represent.py:403: in generate_message repr_vals = repr_values( test-env/lib/python3.8/site-packages/icontract/_represent.py:350: in repr_values recompute_visitor.visit(node=lambda_inspection.node.body) /nix/store/z65l1jqvxa58zzwwa3bvglb6asj4y8cv-python3-3.8.5/lib/python3.8/ast.py:363: in visit return visitor(node) test-env/lib/python3.8/site-packages/icontract/_recompute.py:234: in visit_Compare comparators = [self.visit(node=comparator) for comparator in node.comparators] test-env/lib/python3.8/site-packages/icontract/_recompute.py:234: in <listcomp> comparators = [self.visit(node=comparator) for comparator in node.comparators] /nix/store/z65l1jqvxa58zzwwa3bvglb6asj4y8cv-python3-3.8.5/lib/python3.8/ast.py:363: in visit return visitor(node) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <icontract._recompute.Visitor object at 0x7f1794bf39a0> node = <_ast.JoinedStr object at 0x7f1794bf3280> def generic_visit(self, node: ast.AST) -> None: """Raise an exception that this node has not been handled.""" > raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node)) E NotImplementedError: Unhandled recomputation of the node: <class '_ast.JoinedStr'> <_ast.JoinedStr object at 0x7f1794bf3280> test-env/lib/python3.8/site-packages/icontract/_recompute.py:471: NotImplementedError =========================== short test summary info ============================ FAILED test_foo.py::test_include_a - NotImplementedError: Unhandled recomputa... ========================= 1 failed, 1 passed in 0.31s ========================== ```
Parquery/icontract
diff --git a/tests_3_6/__init__.py b/tests_3_6/__init__.py new file mode 100644 index 0000000..c706c20 --- /dev/null +++ b/tests_3_6/__init__.py @@ -0,0 +1,12 @@ +""" +Test Python 3.6-specific features. + +For example, one such feature is literal string interpolation. +""" + +import sys + +if sys.version_info < (3, 6): + def load_tests(loader, suite, pattern): # pylint: disable=unused-argument + """Ignore all the tests for lower Python versions.""" + return suite diff --git a/tests_3_6/test_represent.py b/tests_3_6/test_represent.py new file mode 100644 index 0000000..65e5c4c --- /dev/null +++ b/tests_3_6/test_represent.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python3 +# pylint: disable=missing-docstring,invalid-name,too-many-public-methods,no-self-use +# pylint: disable=unused-argument + +import textwrap +import unittest +import math +from typing import Optional # pylint: disable=unused-import + +import icontract._represent +import tests.error +import tests.mock + + +class TestLiteralStringInterpolation(unittest.TestCase): + def test_plain_string(self) -> None: + @icontract.require(lambda x: f"something" == '') + def func(x: float) -> float: + return x + + violation_err = None # type: Optional[icontract.ViolationError] + try: + func(x=0) + except icontract.ViolationError as err: + violation_err = err + + self.assertIsNotNone(violation_err) + self.assertEqual( + 'f"something" == \'\': f"something" was \'something\'', + tests.error.wo_mandatory_location(str(violation_err))) + + def test_simple_interpolation(self) -> None: + @icontract.require(lambda x: f"{x}" == '') + def func(x: float) -> float: + return x + + violation_err = None # type: Optional[icontract.ViolationError] + try: + func(x=0) + except icontract.ViolationError as err: + violation_err = err + + self.assertIsNotNone(violation_err) + self.assertEqual( + 'f"{x}" == \'\': f"{x}" was \'0\'', + tests.error.wo_mandatory_location(str(violation_err))) + + def test_string_formatting(self) -> None: + @icontract.require(lambda x: f"{x!s}" == '') + def func(x: float) -> float: + return x + + violation_err = None # type: Optional[icontract.ViolationError] + try: + func(x=1.984) + except icontract.ViolationError as err: + violation_err = err + + self.assertIsNotNone(violation_err) + self.assertEqual( + 'f"{x!s}" == \'\': f"{x!s}" was \'1.984\'', + tests.error.wo_mandatory_location(str(violation_err))) + + def test_repr_formatting(self) -> None: + @icontract.require(lambda x: f"{x!r}" == '') + def func(x: float) -> float: + return x + + violation_err = None # type: Optional[icontract.ViolationError] + try: + func(x=1.984) + except icontract.ViolationError as err: + violation_err = err + + self.assertIsNotNone(violation_err) + self.assertEqual( + 'f"{x!r}" == \'\': f"{x!r}" was \'1.984\'', + tests.error.wo_mandatory_location(str(violation_err))) + + def test_ascii_formatting(self) -> None: + @icontract.require(lambda x: f"{x!a}" == '') + def func(x: float) -> float: + return x + + violation_err = None # type: Optional[icontract.ViolationError] + try: + func(x=1.984) + except icontract.ViolationError as err: + violation_err = err + + self.assertIsNotNone(violation_err) + self.assertEqual( + 'f"{x!a}" == \'\': f"{x!a}" was \'1.984\'', + tests.error.wo_mandatory_location(str(violation_err))) + + def test_format_spec(self) -> None: + @icontract.require(lambda x: f"{x:.3}" == '') + def func(x: float) -> float: + return x + + violation_err = None # type: Optional[icontract.ViolationError] + try: + func(x=1.984) + except icontract.ViolationError as err: + violation_err = err + + self.assertIsNotNone(violation_err) + self.assertEqual( + 'f"{x:.3}" == \'\': f"{x:.3}" was \'1.98\'', + tests.error.wo_mandatory_location(str(violation_err))) + + def test_conversion_and_format_spec(self) -> None: + @icontract.require(lambda x: f"{x!r:.3}" == '') + def func(x: float) -> float: + return x + + violation_err = None # type: Optional[icontract.ViolationError] + try: + func(x=1.984) + except icontract.ViolationError as err: + violation_err = err + + self.assertIsNotNone(violation_err) + self.assertEqual( + 'f"{x!r:.3}" == \'\': f"{x!r:.3}" was \'1.9\'', + tests.error.wo_mandatory_location(str(violation_err))) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests_3_8/__init__.py b/tests_3_8/__init__.py index 774c18e..040310e 100644 --- a/tests_3_8/__init__.py +++ b/tests_3_8/__init__.py @@ -5,3 +5,11 @@ For example, one such feature is walrus operator used in named expressions. We have to exclude these tests running on prior versions of Python since the syntax would be considered invalid. """ + +import sys + +if sys.version_info < (3, 8): + + def load_tests(loader, suite, pattern): # pylint: disable=unused-argument + """Ignore all the tests for lower Python versions.""" + return suite
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
2.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": null, "python": "3.8", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.15.8 asttokens==2.4.1 attrs==25.3.0 cachetools==5.5.2 chardet==5.2.0 colorama==0.4.6 coverage==4.5.4 deal==4.1.0 distlib==0.3.9 docutils==0.20.1 dpcontracts==0.6.0 exceptiongroup==1.2.2 execnet==2.1.1 filelock==3.16.1 hypothesis==6.113.0 -e git+https://github.com/Parquery/icontract.git@38968d2d4b79f44522417f40e9d8cfd6b5762870#egg=icontract importlib_metadata==8.5.0 iniconfig==2.1.0 isort==4.3.21 lazy-object-proxy==1.10.0 mccabe==0.6.1 mypy==0.790 mypy_extensions==0.4.4 packaging==24.2 platformdirs==4.3.6 pluggy==1.5.0 py-cpuinfo==5.0.0 pydocstyle==2.1.1 Pygments==2.19.1 pylint==2.3.1 pyproject-api==1.8.0 pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 six==1.17.0 snowballstemmer==2.2.0 sortedcontainers==2.4.0 tabulate==0.9.0 tomli==2.2.1 tox==4.25.0 typed-ast==1.4.3 typeguard==4.4.0 typing_extensions==4.13.0 vaa==0.2.1 virtualenv==20.29.3 wrapt==1.17.2 yapf==0.20.2 zipp==3.20.2
name: icontract channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.15.8 - asttokens==2.4.1 - attrs==25.3.0 - cachetools==5.5.2 - chardet==5.2.0 - colorama==0.4.6 - coverage==4.5.4 - deal==4.1.0 - distlib==0.3.9 - docutils==0.20.1 - dpcontracts==0.6.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - filelock==3.16.1 - hypothesis==6.113.0 - importlib-metadata==8.5.0 - iniconfig==2.1.0 - isort==4.3.21 - lazy-object-proxy==1.10.0 - mccabe==0.6.1 - mypy==0.790 - mypy-extensions==0.4.4 - packaging==24.2 - platformdirs==4.3.6 - pluggy==1.5.0 - py-cpuinfo==5.0.0 - pydocstyle==2.1.1 - pygments==2.19.1 - pylint==2.3.1 - pyproject-api==1.8.0 - pytest==8.3.5 - pytest-asyncio==0.24.0 - pytest-cov==5.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - six==1.17.0 - snowballstemmer==2.2.0 - sortedcontainers==2.4.0 - tabulate==0.9.0 - tomli==2.2.1 - tox==4.25.0 - typed-ast==1.4.3 - typeguard==4.4.0 - typing-extensions==4.13.0 - vaa==0.2.1 - virtualenv==20.29.3 - wrapt==1.17.2 - yapf==0.20.2 - zipp==3.20.2 prefix: /opt/conda/envs/icontract
[ "tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_ascii_formatting", "tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_conversion_and_format_spec", "tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_format_spec", "tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_plain_string", "tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_repr_formatting", "tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_simple_interpolation", "tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_string_formatting" ]
[]
[]
[]
MIT License
8,993
882
[ "icontract/_recompute.py", "icontract/_represent.py", "precommit.py" ]
meerk40t__svgelements-58
fcc436609dbda4eedbb72e03afc58d12a52b76a3
2020-11-19 20:55:38
9fe3387671c85d742f73fb054e86118a235abd27
diff --git a/svgelements/svgelements.py b/svgelements/svgelements.py index ed0e7a3..a1877c8 100644 --- a/svgelements/svgelements.py +++ b/svgelements/svgelements.py @@ -870,8 +870,6 @@ class Length(object): @staticmethod def str(s): - if s is None: - return "n/a" if isinstance(s, Length): if s.units == '': s = s.amount @@ -3674,9 +3672,30 @@ class CubicBezier(PathSegment): local_extrema = [self.point(t)[v] for t in local_extremizers] return min(local_extrema), max(local_extrema) + def _derivative(self, t): + """returns the nth derivative of the segment at t. + Note: Bezier curves can have points where their derivative vanishes. + If you are interested in the tangent direction, use the unit_tangent() + method instead.""" + p = [self.start, self.control1, self.control2, self.end] + return 3 * (p[1] - p[0]) * (1 - t) ** 2 + 6 * (p[2] - p[1]) * ( + 1 - t) * t + 3 * ( + p[3] - p[2]) * t ** 2 + + def _length_scipy(self, error=ERROR): + from scipy.integrate import quad + return quad(lambda tau: abs(self._derivative(tau)), 0., 1., + epsabs=error, limit=1000)[0] + + def _length_default(self, error=ERROR, min_depth=MIN_DEPTH): + return self._line_length(0, 1, error, min_depth) + def length(self, error=ERROR, min_depth=MIN_DEPTH): """Calculate the length of the path up to a certain position""" - return self._line_length(0, 1, error, min_depth) + try: + return self._length_scipy(error) + except ModuleNotFoundError: + return self._length_default(error, min_depth) def is_smooth_from(self, previous): """Checks if this segment would be a smooth segment following the previous"""
Use scipy if available for CubicBezier.length() While trying to benchmark bezier curves (#52), I ran into the issue that `CubicBezier.length()`is *extremely* slow, due to using a linearisation approach. When available, `svgpathtools` uses scipy for this task: ```python def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH): """Calculate the length of the path up to a certain position""" # ... # using scipy.integrate.quad is quick if _quad_available: s = quad(lambda tau: abs(self.derivative(tau)), t0, t1, epsabs=error, limit=1000)[0] else: s = segment_length(self, t0, t1, self.point(t0), self.point(t1), error, min_depth, 0) # ... return s def derivative(self, t, n=1): """returns the nth derivative of the segment at t. Note: Bezier curves can have points where their derivative vanishes. If you are interested in the tangent direction, use the unit_tangent() method instead.""" p = self.bpoints() if n == 1: return 3*(p[1] - p[0])*(1 - t)**2 + 6*(p[2] - p[1])*(1 - t)*t + 3*( p[3] - p[2])*t**2 elif n == 2: return 6*( (1 - t)*(p[2] - 2*p[1] + p[0]) + t*(p[3] - 2*p[2] + p[1])) elif n == 3: return 6*(p[3] - 3*(p[2] - p[1]) - p[0]) elif n > 3: return 0 else: raise ValueError("n should be a positive integer.") ``` @tatarize would you be ok to have such an approach in `svgelements`?
meerk40t/svgelements
diff --git a/test/test_cubic_bezier_length.py b/test/test_cubic_bezier_length.py new file mode 100644 index 0000000..6685b20 --- /dev/null +++ b/test/test_cubic_bezier_length.py @@ -0,0 +1,21 @@ +from __future__ import print_function + +import unittest +from random import * + +from svgelements import * + + +def get_random_cubic_bezier(): + return CubicBezier((random() * 50, random() * 50), (random() * 50, random() * 50), + (random() * 50, random() * 50), (random() * 50, random() * 50)) + + +class TestElementCubicBezierLength(unittest.TestCase): + + def test_cubic_bezier_length(self): + for _ in range(100): + b = get_random_cubic_bezier() + l1 = b._length_scipy() + l2 = b._length_default() + self.assertAlmostEqual(l1, l2) diff --git a/test/test_viewbox.py b/test/test_viewbox.py index 72669af..f8a3f0f 100644 --- a/test/test_viewbox.py +++ b/test/test_viewbox.py @@ -31,11 +31,4 @@ class TestElementViewbox(unittest.TestCase): def test_viewbox_translate(self): v = Viewbox({'viewBox': '-50 -50 100 100', 'height': 100, 'width': 100}) - self.assertEqual(v.transform(), 'translate(50, 50)') - - def test_viewbox_incomplete_print(self): - v = Viewbox({'viewBox': None, 'height': 200, 'width': 200}) - try: - str(v) - except TypeError: - self.fail("str(viewbox) should not fail for incomplete viewbox") \ No newline at end of file + self.assertEqual(v.transform(), 'translate(50, 50)') \ No newline at end of file
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
1.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "scipy", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work numpy==2.0.2 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work scipy==1.13.1 -e git+https://github.com/meerk40t/svgelements.git@fcc436609dbda4eedbb72e03afc58d12a52b76a3#egg=svgelements tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: svgelements channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - numpy==2.0.2 - scipy==1.13.1 prefix: /opt/conda/envs/svgelements
[ "test/test_cubic_bezier_length.py::TestElementCubicBezierLength::test_cubic_bezier_length" ]
[]
[ "test/test_viewbox.py::TestElementViewbox::test_viewbox_incomplete", "test/test_viewbox.py::TestElementViewbox::test_viewbox_scale", "test/test_viewbox.py::TestElementViewbox::test_viewbox_simple", "test/test_viewbox.py::TestElementViewbox::test_viewbox_translate" ]
[]
MIT License
9,000
543
[ "svgelements/svgelements.py" ]
codalab__codalab-worksheets-3076
ae54f808964ec5bf7cc300fe09b10aa153249ae8
2020-11-20 10:02:37
3cc485ea49ffbd30b62044473427bdb8987fe7fc
diff --git a/codalab/lib/interactive_session.py b/codalab/lib/interactive_session.py index 445d4368..9ee96254 100644 --- a/codalab/lib/interactive_session.py +++ b/codalab/lib/interactive_session.py @@ -31,7 +31,7 @@ class InteractiveSession: 9. Stop and remove the interactive session container. """ - _BASH_HISTORY_CONTAINER_PATH = '/root/.bash_history' + _BASH_HISTORY_CONTAINER_PATH = "/usr/sbin/.bash_history" _CHOOSE_COMMAND_INSTRUCTIONS = ( "\n\n#\n" "# Choose the commands to use for cl run:\n" @@ -80,10 +80,15 @@ class InteractiveSession: self._stderr = stderr def start(self): - self._bundle_path = os.path.join( + self._host_bundle_path = os.path.join( self._manager.codalab_home, 'local_bundles', self._session_uuid ) - os.makedirs(self._bundle_path) + os.makedirs(self._host_bundle_path) + + # Create a blank file which will be used as the bash history file that will later be + # mounted and populated during the interactive session. + self._host_bash_history_path = os.path.join(self._host_bundle_path, ".bash_history") + open(self._host_bash_history_path, 'w').close() run_command = self.get_docker_run_command() @@ -91,11 +96,13 @@ class InteractiveSession: print('\nStarting an interactive session...', file=self._stdout) print('%s\n' % run_command, file=self._stdout) print('=' * 150, file=self._stdout) - print('Session UUID:', self._session_uuid, file=self._stdout) - print('CodaLab instance:', self._manager.current_client().address, file=self._stdout) - print('Container name:', self._get_container_name(), file=self._stdout) - print('Container Docker image:', self._docker_image, file=self._stdout) - print('You can find local bundle contents at:', self._bundle_path, file=self._stdout) + print('Session UUID: ', self._session_uuid, file=self._stdout) + print('CodaLab instance: ', self._manager.current_client().address, file=self._stdout) + print('Container name: ', self._get_container_name(), file=self._stdout) + print('Container Docker image: ', self._docker_image, file=self._stdout) + print( + 'You can find local bundle contents at: ', self._host_bundle_path, file=self._stdout + ) print('=' * 150 + '\n', file=self._stdout) self._container = self._start_session(run_command) @@ -136,7 +143,15 @@ class InteractiveSession: volumes[get_docker_path(key)] = dependency_local_path name = self._get_container_name() - command = ['docker run', '-it', f'--name {name}', f'-w {os.path.sep}{self._session_uuid}'] + # Start a container as a non-root user. Root (id = 0) is the default user within a container. + # When passing a numeric ID, the user does not have to exist in the container. + command = [ + 'docker run', + '-it', + f'--name {name}', + f'-w {os.path.sep}{self._session_uuid}', + '-u 1', + ] command.extend( [ # Example: -v local_path/some_folder:/0x707e903500e54bcf9b072ac7e3f5ed36_dependencies/foo:ro @@ -144,6 +159,11 @@ class InteractiveSession: for docker_path, local_path in volumes.items() ] ) + command.append( + '-v {}:{}:rw'.format( + self._host_bash_history_path, InteractiveSession._BASH_HISTORY_CONTAINER_PATH + ) + ) command.append(self._docker_image) command.append('bash') return ' '.join(command) @@ -154,7 +174,7 @@ class InteractiveSession: self._container.stop() self._container.remove() - shutil.rmtree(self._bundle_path, ignore_errors=True) + shutil.rmtree(self._host_bundle_path, ignore_errors=True) if self._verbose: print('Done.\n', file=self._stdout) @@ -175,10 +195,11 @@ class InteractiveSession: def _construct_final_command(self): try: candidate_commands = self._get_bash_history() - except Exception: + except Exception as e: print( - 'The history of bash commands could not be retrieved at path: %s' - % InteractiveSession._BASH_HISTORY_CONTAINER_PATH, + 'The history of bash commands could not be retrieved at path {}: {}'.format( + InteractiveSession._BASH_HISTORY_CONTAINER_PATH, e + ), file=self._stderr, ) return '' @@ -188,7 +209,7 @@ class InteractiveSession: candidate_commands.insert(0, self._initial_command + '\n') # Write out the commands to choose from and the instructions out to a file - path = os.path.join(self._bundle_path, 'edit_commands.txt') + path = os.path.join(self._host_bundle_path, 'edit_commands.txt') with open(path, 'w') as f: for command in candidate_commands: f.write(command) @@ -210,17 +231,9 @@ class InteractiveSession: return final_command def _get_bash_history(self): - # Copies out .bash_history from the container to bundle_path - path = os.path.join(self._bundle_path, '.bash_history') - f = open(path, 'wb') - stream, _ = self._container.get_archive(InteractiveSession._BASH_HISTORY_CONTAINER_PATH) - for chunk in stream: - f.write(chunk) - f.close() - # Extract out a list of commands from .bash_history commands = [] - with open(path) as f: + with open(self._host_bash_history_path) as f: for i, line in enumerate(f): command = ( line.rstrip(InteractiveSession._NULL_BYTE)
Run cl run --interactive as a non-root user **Describe the bug** Run `cl run --interactive` as a non-root user to match the same behavior as CodaLab worker running jobs.
codalab/codalab-worksheets
diff --git a/tests/unit/lib/interactive_session_test.py b/tests/unit/lib/interactive_session_test.py index fa5eb0fa..584e7873 100644 --- a/tests/unit/lib/interactive_session_test.py +++ b/tests/unit/lib/interactive_session_test.py @@ -12,10 +12,11 @@ class InteractiveSessionTest(unittest.TestCase): session = InteractiveSession( 'some-docker-image', dependencies=targets, bundle_locations=bundle_locations ) + session._host_bash_history_path = ".bash_history" expected_regex = ( - 'docker run -it --name interactive-session-0x[a-z0-9]{32} -w \/0x[a-z0-9]{32} -v ' + 'docker run -it --name interactive-session-0x[a-z0-9]{32} -w \/0x[a-z0-9]{32} -u 1 -v ' '[\s\S]{0,100}local\/path1:\/0x[a-z0-9]{32}\/key:ro -v [\s\S]{0,100}local\/path2:\/0x[a-z0-9]{32}\/key2:ro ' - 'some-docker-image bash' + '-v \.bash_history:\/usr\/sbin\/\.bash_history:rw some-docker-image bash' ) self.assertTrue(re.match(expected_regex, session.get_docker_run_command())) @@ -28,10 +29,11 @@ class InteractiveSessionTest(unittest.TestCase): session = InteractiveSession( 'some-docker-image', dependencies=targets, bundle_locations=bundle_locations ) + session._host_bash_history_path = ".bash_history" expected_regex = ( - 'docker run -it --name interactive-session-0x[a-z0-9]{32} -w \/0x[a-z0-9]{32} -v ' + 'docker run -it --name interactive-session-0x[a-z0-9]{32} -w \/0x[a-z0-9]{32} -u 1 -v ' '[\s\S]{0,100}local\/path1/sub/path1:\/0x[a-z0-9]{32}\/key:ro -v [\s\S]{0,100}local\/path2/sub/path2' - ':\/0x[a-z0-9]{32}\/key2:ro some-docker-image bash' + ':\/0x[a-z0-9]{32}\/key2:ro -v \.bash_history:\/usr\/sbin\/\.bash_history:rw some-docker-image bash' ) self.assertTrue(re.match(expected_regex, session.get_docker_run_command()))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
apache-beam==2.25.0 argcomplete==1.12.1 attrs==22.2.0 avro-python3==1.9.2.1 azure-core==1.8.0 azure-storage-blob==12.4.0 bottle==0.12.18 certifi==2021.5.30 cffi==1.15.1 chardet==3.0.4 -e git+https://github.com/codalab/codalab-worksheets.git@ae54f808964ec5bf7cc300fe09b10aa153249ae8#egg=codalab crcmod==1.7 cryptography==40.0.2 dataclasses==0.7 diffimg==0.2.3 dill==0.3.1.1 docker==4.3.0 docopt==0.6.2 fastavro==1.4.7 fusepy==2.0.4 future==0.18.3 grpcio==1.48.2 hdfs==2.7.3 httplib2==0.17.4 idna==2.10 importlib-metadata==2.1.3 iniconfig==1.1.1 isodate==0.6.1 markdown2==2.3.10 marshmallow==2.15.1 marshmallow-jsonapi==0.15.1 mock==2.0.0 msrest==0.6.21 numpy==1.19.5 oauth2client==4.1.3 oauthlib==2.1.0 packaging==21.3 pathtools==0.1.2 pbr==6.1.1 Pillow==8.4.0 pluggy==1.0.0 protobuf==3.19.6 psutil==5.7.2 py==1.11.0 pyarrow==0.17.1 pyasn1==0.5.1 pyasn1-modules==0.3.0 pycparser==2.21 pydot==1.4.2 pymongo==3.13.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.8.1 pytz==2025.2 PyYAML==5.3.1 requests==2.24.0 requests-oauthlib==1.1.0 rsa==4.9 selenium==3.141.0 sentry-sdk==0.18.0 six==1.15.0 SQLAlchemy==1.3.19 tomli==1.2.3 typing-extensions==3.7.4.3 urllib3==1.25.11 watchdog==0.10.3 websocket-client==1.3.1 zipp==3.6.0
name: codalab-worksheets channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - apache-beam==2.25.0 - argcomplete==1.12.1 - attrs==22.2.0 - avro-python3==1.9.2.1 - azure-core==1.8.0 - azure-storage-blob==12.4.0 - bottle==0.12.18 - cffi==1.15.1 - chardet==3.0.4 - crcmod==1.7 - cryptography==40.0.2 - dataclasses==0.7 - diffimg==0.2.3 - dill==0.3.1.1 - docker==4.3.0 - docopt==0.6.2 - fastavro==1.4.7 - fusepy==2.0.4 - future==0.18.3 - grpcio==1.48.2 - hdfs==2.7.3 - httplib2==0.17.4 - idna==2.10 - importlib-metadata==2.1.3 - iniconfig==1.1.1 - isodate==0.6.1 - markdown2==2.3.10 - marshmallow==2.15.1 - marshmallow-jsonapi==0.15.1 - mock==2.0.0 - msrest==0.6.21 - numpy==1.19.5 - oauth2client==4.1.3 - oauthlib==2.1.0 - packaging==21.3 - pathtools==0.1.2 - pbr==6.1.1 - pillow==8.4.0 - pluggy==1.0.0 - protobuf==3.19.6 - psutil==5.7.2 - py==1.11.0 - pyarrow==0.17.1 - pyasn1==0.5.1 - pyasn1-modules==0.3.0 - pycparser==2.21 - pydot==1.4.2 - pymongo==3.13.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.8.1 - pytz==2025.2 - pyyaml==5.3.1 - requests==2.24.0 - requests-oauthlib==1.1.0 - rsa==4.9 - selenium==3.141.0 - sentry-sdk==0.18.0 - six==1.15.0 - sqlalchemy==1.3.19 - tomli==1.2.3 - typing-extensions==3.7.4.3 - urllib3==1.25.11 - watchdog==0.10.3 - websocket-client==1.3.1 - wheel==0.35.1 - zipp==3.6.0 prefix: /opt/conda/envs/codalab-worksheets
[ "tests/unit/lib/interactive_session_test.py::InteractiveSessionTest::test_get_docker_run_command", "tests/unit/lib/interactive_session_test.py::InteractiveSessionTest::test_get_docker_run_command_with_subpaths" ]
[]
[ "tests/unit/lib/interactive_session_test.py::InteractiveSessionTest::test_missing_bundle_location" ]
[]
Apache License 2.0
9,001
1,447
[ "codalab/lib/interactive_session.py" ]
Duke-GCB__DukeDSClient-317
6df1eb9f210f98d4ff826f89f89309f48c4e8c5b
2020-11-20 14:09:53
31b05acf213e41739c90be0cb9db0d102ee17187
diff --git a/ddsc/config.py b/ddsc/config.py index f74ab5d..582c9aa 100644 --- a/ddsc/config.py +++ b/ddsc/config.py @@ -88,7 +88,11 @@ class Config(object): filename = os.path.expanduser(filename) if os.path.exists(filename): with open(filename, 'r') as yaml_file: - self.update_properties(yaml.safe_load(yaml_file)) + config_data = yaml.safe_load(yaml_file) + if config_data: + self.update_properties(config_data) + else: + raise ValueError("Error: Empty config file {}".format(filename)) def update_properties(self, new_values): """
Fix error when the .ddsclient config file is empty When a the .ddsclient config file is empty the following error is produced when running a command such as `ddsclient list`: ``` type object argument after ** must be a mapping, not NoneType ``` See #315 for more
Duke-GCB/DukeDSClient
diff --git a/ddsc/tests/test_config.py b/ddsc/tests/test_config.py index 58e21ad..073ad89 100644 --- a/ddsc/tests/test_config.py +++ b/ddsc/tests/test_config.py @@ -2,7 +2,7 @@ from unittest import TestCase import math import ddsc.config import multiprocessing -from mock.mock import patch +from mock.mock import patch, mock_open class TestConfig(TestCase): @@ -159,3 +159,13 @@ class TestConfig(TestCase): } config.update_properties(some_config) self.assertEqual(config.storage_provider_id, '123456') + + @patch('ddsc.config.os') + def test_add_properties_empty_file(self, mock_os): + mock_os.path.expanduser.return_value = '/home/user/.ddsclient' + mock_os.path.exists.return_value = True + config = ddsc.config.Config() + with self.assertRaises(ValueError) as raised_exception: + with patch('builtins.open', mock_open(read_data='')): + config.add_properties('~/.ddsclient') + self.assertEqual(str(raised_exception.exception), 'Error: Empty config file /home/user/.ddsclient')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
3.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "mock", "nose" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 -e git+https://github.com/Duke-GCB/DukeDSClient.git@6df1eb9f210f98d4ff826f89f89309f48c4e8c5b#egg=DukeDSClient exceptiongroup==1.2.2 future==1.0.0 idna==3.10 iniconfig==2.1.0 mock==5.2.0 nose==1.3.7 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytz==2025.2 PyYAML==6.0.2 requests==2.32.3 six==1.17.0 tenacity==6.2.0 tomli==2.2.1 urllib3==2.3.0
name: DukeDSClient channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - exceptiongroup==1.2.2 - future==1.0.0 - idna==3.10 - iniconfig==2.1.0 - mock==5.2.0 - nose==1.3.7 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytz==2025.2 - pyyaml==6.0.2 - requests==2.32.3 - six==1.17.0 - tenacity==6.2.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/DukeDSClient
[ "ddsc/tests/test_config.py::TestConfig::test_add_properties_empty_file" ]
[]
[ "ddsc/tests/test_config.py::TestConfig::test_MB_chunk_convert", "ddsc/tests/test_config.py::TestConfig::test_create_config_no_env_set", "ddsc/tests/test_config.py::TestConfig::test_create_config_with_env_set", "ddsc/tests/test_config.py::TestConfig::test_default_num_workers", "ddsc/tests/test_config.py::TestConfig::test_empty_config", "ddsc/tests/test_config.py::TestConfig::test_get_portal_url_base", "ddsc/tests/test_config.py::TestConfig::test_get_user_config_filename", "ddsc/tests/test_config.py::TestConfig::test_get_user_config_get_page_size", "ddsc/tests/test_config.py::TestConfig::test_global_then_local", "ddsc/tests/test_config.py::TestConfig::test_parse_bytes_str", "ddsc/tests/test_config.py::TestConfig::test_storage_provider_id" ]
[]
MIT License
9,003
164
[ "ddsc/config.py" ]
kinegratii__borax-23
c7741cdfcd1b04476e2f140e86c96160227b1207
2020-11-21 13:56:45
98d86af8a8e9dd7f7198585b6d749af4a4ad076b
diff --git a/borax/calendars/lunardate.py b/borax/calendars/lunardate.py index b7adda7..6e37c33 100644 --- a/borax/calendars/lunardate.py +++ b/borax/calendars/lunardate.py @@ -352,9 +352,10 @@ class TextUtils: def day_cn(day: int) -> str: a, b = divmod(day, 10) if b == 0: # 10,20,30 - b = 10 - if a == 1: # 10 - a = 0 + if a == 1: + return TextUtils.TENS[0] + TextUtils.DAYS_CN[10] + else: + return TextUtils.DAYS_CN[a] + TextUtils.DAYS_CN[10] return TextUtils.TENS[a] + TextUtils.DAYS_CN[b] @staticmethod
农历日“二十”“三十”中文名称错误 ## 发现版本 v3.4.0 ## 问题描述 “廿十”、“卅十” 是不正确的用法,因为“廿”、“卅” 本身就是二十、三十的意思了,正确的应该为“二十”、“三十”。 ## 代码示例 ```python from borax.calendars.lunardate import LunarDate, TextUtils print(TextUtils.day_cn(20)) # “廿十” ld = LunarDate(2020, 10, 20, 0) print(ld.cn_day) # “廿十” print(ld.cn_day_calendar) # “廿十” print(ld.cn_str()) # “廿十” ``` ## 建议方案 只需要修改 `TextUtils.day_cn` 逻辑即可,其他函数均是调用该函数。
kinegratii/borax
diff --git a/tests/test_lunardate.py b/tests/test_lunardate.py index 86417c7..092c2c9 100644 --- a/tests/test_lunardate.py +++ b/tests/test_lunardate.py @@ -4,7 +4,22 @@ import datetime import unittest from datetime import date, timedelta -from borax.calendars.lunardate import LunarDate, parse_year_days, LCalendars, InvalidLunarDateError +from borax.calendars.lunardate import LunarDate, parse_year_days, LCalendars, InvalidLunarDateError, TextUtils + + +class TextUtilsTestCase(unittest.TestCase): + def test_cn_day_text(self): + data = [ + (1, '初一'), + (10, '初十'), + (14, '十四'), + (20, '二十'), + (23, '廿三'), + (30, '三十') + ] + for value, text in data: + with self.subTest(value=value, text=text): + self.assertEqual(text, TextUtils.day_cn(value)) class LunarDateTestCase(unittest.TestCase):
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
3.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose2", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/kinegratii/borax.git@c7741cdfcd1b04476e2f140e86c96160227b1207#egg=borax coverage==5.5 exceptiongroup==1.2.2 flake8==3.9.2 iniconfig==2.1.0 mccabe==0.6.1 nose2==0.15.1 packaging==24.2 pluggy==1.5.0 pycodestyle==2.7.0 pyflakes==2.3.1 pytest==8.3.5 tomli==2.2.1
name: borax channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - borax==3.4.0 - coverage==5.5 - exceptiongroup==1.2.2 - flake8==3.9.2 - iniconfig==2.1.0 - mccabe==0.6.1 - nose2==0.15.1 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.7.0 - pyflakes==2.3.1 - pytest==8.3.5 - setuptools==47.3.2 - tomli==2.2.1 prefix: /opt/conda/envs/borax
[ "tests/test_lunardate.py::TextUtilsTestCase::test_cn_day_text" ]
[]
[ "tests/test_lunardate.py::LunarDateTestCase::test_comparison", "tests/test_lunardate.py::LunarDateTestCase::test_convert_datetime", "tests/test_lunardate.py::LunarDateTestCase::test_create_date", "tests/test_lunardate.py::LunarDateTestCase::test_create_specific_dates", "tests/test_lunardate.py::LunarDateTestCase::test_immutable_feature", "tests/test_lunardate.py::LunarDateTestCase::test_new_date", "tests/test_lunardate.py::LunarDateTestCase::test_solar_and_lunar", "tests/test_lunardate.py::LunarDateTestCase::test_term_ganzhi_feature", "tests/test_lunardate.py::LunarDateTestCase::test_timedelta", "tests/test_lunardate.py::PrivateMethodsTestCase::test_year_info", "tests/test_lunardate.py::FormatterTestCase::test_cn_calendar_day", "tests/test_lunardate.py::FormatterTestCase::test_term", "tests/test_lunardate.py::FormatterTestCase::test_valid_format", "tests/test_lunardate.py::LCalendarTestCase::test_delta", "tests/test_lunardate.py::LCalendarTestCase::test_leap_check", "tests/test_lunardate.py::LCalendarTestCase::test_ndays" ]
[]
MIT License
9,010
238
[ "borax/calendars/lunardate.py" ]
googleapis__python-storage-323
547740c0a898492e76ce5e60dd20c7ddb8a53d1f
2020-11-22 16:39:38
6ef1de2bd1a9cfab765c736cb849970b2587398c
google-cla[bot]: Thanks for your pull request. It looks like this may be your first contribution to a Google open source project (if not, look below for help). Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA). :memo: **Please visit <https://cla.developers.google.com/> to sign.** Once you've signed (or fixed any issues), please reply here with `@googlebot I signed it!` and we'll verify it. ---- #### What to do if you already signed the CLA ##### Individual signers * It's possible we don't have your GitHub username or you're using a different email address on your commit. Check [your existing CLA data](https://cla.developers.google.com/clas) and verify that your [email is set on your git commits](https://help.github.com/articles/setting-your-email-in-git/). ##### Corporate signers * Your company has a Point of Contact who decides which employees are authorized to participate. Ask your POC to be added to the group of authorized contributors. If you don't know who your Point of Contact is, direct the Google project maintainer to [go/cla#troubleshoot](http://go/cla#troubleshoot) ([Public version](https://opensource.google/docs/cla/#troubleshoot)). * The email used to register you as an authorized contributor must be the email used for the Git commit. Check [your existing CLA data](https://cla.developers.google.com/clas) and verify that your [email is set on your git commits](https://help.github.com/articles/setting-your-email-in-git/). * The email used to register you as an authorized contributor must also be [attached to your GitHub account](https://github.com/settings/emails). ℹ️ **Googlers: [Go here](https://goto.google.com/prinfo/https%3A%2F%2Fgithub.com%2Fgoogleapis%2Fpython-storage%2Fpull%2F323) for more info**. <!-- need_sender_cla --> haim0n: > Thanks for the patch. Please add unit test assertions (as I did in PR #325, which I am closing in favor of this one). done
diff --git a/google/cloud/storage/blob.py b/google/cloud/storage/blob.py index 18006d5..38da243 100644 --- a/google/cloud/storage/blob.py +++ b/google/cloud/storage/blob.py @@ -3613,7 +3613,7 @@ class Blob(_PropertyMixin): if value is not None: value = _datetime_to_rfc3339(value) - self._properties["customTime"] = value + self._patch_property("customTime", value) def _get_encryption_headers(key, source=False):
'Blob.custom_time' setter doesnt update '_changes' i think the `custom_time setter` code should be: ```python self._patch_property('customTime', value) ``` instead of ```python self._properties["customTime"] = value ```
googleapis/python-storage
diff --git a/tests/unit/test_blob.py b/tests/unit/test_blob.py index bc99186..5f93bc2 100644 --- a/tests/unit/test_blob.py +++ b/tests/unit/test_blob.py @@ -3987,6 +3987,7 @@ class Test_Blob(unittest.TestCase): if_generation_match=GENERATION_NUMBER, if_source_generation_match=SOURCE_GENERATION_NUMBER, ) + self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested @@ -4236,6 +4237,7 @@ class Test_Blob(unittest.TestCase): self.assertIsNone(blob.metadata) blob.metadata = METADATA self.assertEqual(blob.metadata, METADATA) + self.assertIn("metadata", blob._changes) def test_metadata_setter_w_nan(self): BLOB_NAME = "blob-name" @@ -4246,6 +4248,7 @@ class Test_Blob(unittest.TestCase): blob.metadata = METADATA value = blob.metadata["foo"] self.assertIsInstance(value, str) + self.assertIn("metadata", blob._changes) def test_metageneration(self): BUCKET = object() @@ -4444,6 +4447,7 @@ class Test_Blob(unittest.TestCase): self.assertIsNone(blob.custom_time) blob.custom_time = TIMESTAMP self.assertEqual(blob.custom_time, TIMESTAMP) + self.assertIn("customTime", blob._changes) def test_custom_time_setter_none_value(self): from google.cloud._helpers import _RFC3339_MICROS
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
1.33
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==4.2.4 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup==1.2.2 google-api-core==2.10.2 google-auth==1.35.0 google-cloud-core==1.7.3 -e git+https://github.com/googleapis/python-storage.git@547740c0a898492e76ce5e60dd20c7ddb8a53d1f#egg=google_cloud_storage google-crc32c==1.7.1 google-resumable-media==1.3.3 googleapis-common-protos==1.69.2 idna==3.10 iniconfig==2.1.0 mock==5.2.0 packaging==24.2 pluggy==1.5.0 protobuf==4.25.6 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest==8.3.5 pytest-cov==6.0.0 requests==2.32.3 rsa==4.9 six==1.17.0 tomli==2.2.1 urllib3==2.3.0
name: python-storage channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==4.2.4 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - exceptiongroup==1.2.2 - google-api-core==2.10.2 - google-auth==1.35.0 - google-cloud-core==1.7.3 - google-crc32c==1.7.1 - google-resumable-media==1.3.3 - googleapis-common-protos==1.69.2 - idna==3.10 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - protobuf==4.25.6 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-cov==6.0.0 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/python-storage
[ "tests/unit/test_blob.py::Test_Blob::test_custom_time_setter" ]
[]
[ "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_checksum", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_checksum", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_wo_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_wo_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_bad_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_client", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_generation_not_match", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_kms", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_kms_with_version", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_metadata", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_data_corruption", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_predefined_acl", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_multipart", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_multipart_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_resumable", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_resumable_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__encryption_headers_w_encryption_key", "tests/unit/test_blob.py::Test_Blob::test__encryption_headers_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_default", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_explicit", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_from_blob", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_from_filename", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_generation", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_media_link", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_media_link_w_user_project", "tests/unit/test_blob.py::Test_Blob::test__get_transport", "tests/unit/test_blob.py::Test_Blob::test__get_upload_arguments", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_no_changes", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_unwritable_field", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_with_changes", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_chunk_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_client", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_extra_headers", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_generation_not_match", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_kms", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_kms_with_version", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_metadata", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_predefined_acl", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_without_chunk_size", "tests/unit/test_blob.py::Test_Blob::test__query_params_default", "tests/unit/test_blob.py::Test_Blob::test__query_params_w_generation", "tests/unit/test_blob.py::Test_Blob::test__query_params_w_user_project", "tests/unit/test_blob.py::Test_Blob::test__set_metadata_to_none", "tests/unit/test_blob.py::Test_Blob::test__set_properties_w_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test__set_properties_wo_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test_acl_property", "tests/unit/test_blob.py::Test_Blob::test_bucket_readonly_property", "tests/unit/test_blob.py::Test_Blob::test_cache_control_getter", "tests/unit/test_blob.py::Test_Blob::test_cache_control_setter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_ctor", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_getter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_setter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_setter_bad_value", "tests/unit/test_blob.py::Test_Blob::test_client", "tests/unit/test_blob.py::Test_Blob::test_component_count", "tests/unit/test_blob.py::Test_Blob::test_component_count_string_val", "tests/unit/test_blob.py::Test_Blob::test_component_count_unset", "tests/unit/test_blob.py::Test_Blob::test_compose_minimal_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_compose_w_additional_property_changes", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match_bad_length", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match_nones", "tests/unit/test_blob.py::Test_Blob::test_compose_wo_content_type_set", "tests/unit/test_blob.py::Test_Blob::test_content_disposition_getter", "tests/unit/test_blob.py::Test_Blob::test_content_disposition_setter", "tests/unit/test_blob.py::Test_Blob::test_content_encoding_getter", "tests/unit/test_blob.py::Test_Blob::test_content_encoding_setter", "tests/unit/test_blob.py::Test_Blob::test_content_language_getter", "tests/unit/test_blob.py::Test_Blob::test_content_language_setter", "tests/unit/test_blob.py::Test_Blob::test_content_type_getter", "tests/unit/test_blob.py::Test_Blob::test_content_type_setter", "tests/unit/test_blob.py::Test_Blob::test_crc32c_getter", "tests/unit/test_blob.py::Test_Blob::test_crc32c_setter", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_failure", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_origin", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_kms_key_name_and_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_ctor_with_encoded_unicode", "tests/unit/test_blob.py::Test_Blob::test_ctor_with_generation", "tests/unit/test_blob.py::Test_Blob::test_ctor_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_custom_time_getter", "tests/unit/test_blob.py::Test_Blob::test_custom_time_setter_none_value", "tests/unit/test_blob.py::Test_Blob::test_custom_time_unset", "tests/unit/test_blob.py::Test_Blob::test_delete_w_generation", "tests/unit/test_blob.py::Test_Blob::test_delete_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_delete_wo_generation", "tests/unit/test_blob.py::Test_Blob::test_download_as_byte_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_string", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_hash_response_header_none", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_response_headers", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_response_headers_not_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_encoding", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_chunks_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_chunks_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_with_failure", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_chunks_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_chunks_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_media_link", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_corrupted", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_key", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_updated_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_updated_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_wo_updated_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_wo_updated_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_etag", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_false", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_missing", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_true", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_setter", "tests/unit/test_blob.py::Test_Blob::test_exists_hit_w_generation", "tests/unit/test_blob.py::Test_Blob::test_exists_hit_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_exists_miss", "tests/unit/test_blob.py::Test_Blob::test_exists_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_domain_name_bucket", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_invalid_uri", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_valid_uri", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_no_version_passed_warning", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_content_md5", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_content_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_credentials", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_csek", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_csek_and_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_defaults", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_endpoint", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_expiration", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_generation", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_lowercase_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_non_ascii_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_response_disposition", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_response_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_bucket_bound_hostname_w_bare_hostname", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_bucket_bound_hostname_w_scheme", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_content_md5", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_content_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_credentials", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_csek", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_csek_and_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_defaults", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_endpoint", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_generation", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_lowercase_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_non_ascii_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_response_disposition", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_response_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_virtual_hostname", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_w_invalid_version", "tests/unit/test_blob.py::Test_Blob::test_generation", "tests/unit/test_blob.py::Test_Blob::test_generation_string_val", "tests/unit/test_blob.py::Test_Blob::test_generation_unset", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy_w_requested_policy_version", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_id", "tests/unit/test_blob.py::Test_Blob::test_make_private", "tests/unit/test_blob.py::Test_Blob::test_make_public", "tests/unit/test_blob.py::Test_Blob::test_md5_hash_getter", "tests/unit/test_blob.py::Test_Blob::test_md5_hash_setter", "tests/unit/test_blob.py::Test_Blob::test_media_link", "tests/unit/test_blob.py::Test_Blob::test_metadata_getter", "tests/unit/test_blob.py::Test_Blob::test_metadata_setter", "tests/unit/test_blob.py::Test_Blob::test_metadata_setter_w_nan", "tests/unit/test_blob.py::Test_Blob::test_metageneration", "tests/unit/test_blob.py::Test_Blob::test_metageneration_string_val", "tests/unit/test_blob.py::Test_Blob::test_metageneration_unset", "tests/unit/test_blob.py::Test_Blob::test_owner", "tests/unit/test_blob.py::Test_Blob::test_path_bad_bucket", "tests/unit/test_blob.py::Test_Blob::test_path_no_name", "tests/unit/test_blob.py::Test_Blob::test_path_normal", "tests/unit/test_blob.py::Test_Blob::test_path_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_path_with_non_ascii", "tests/unit/test_blob.py::Test_Blob::test_public_url", "tests/unit/test_blob.py::Test_Blob::test_public_url_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_public_url_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_public_url_with_non_ascii", "tests/unit/test_blob.py::Test_Blob::test_retention_expiration_time", "tests/unit/test_blob.py::Test_Blob::test_retention_expiration_time_unset", "tests/unit/test_blob.py::Test_Blob::test_rewrite_other_bucket_other_name_no_encryption_partial", "tests/unit/test_blob.py::Test_Blob::test_rewrite_response_without_resource", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_no_key_new_key_w_token", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_no_old_key_new_key_done_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_w_old_key_new_kms_key", "tests/unit/test_blob.py::Test_Blob::test_rewrite_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_rewrite_w_generations", "tests/unit/test_blob.py::Test_Blob::test_self_link", "tests/unit/test_blob.py::Test_Blob::test_set_iam_policy", "tests/unit/test_blob.py::Test_Blob::test_set_iam_policy_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_size", "tests/unit/test_blob.py::Test_Blob::test_size_string_val", "tests/unit/test_blob.py::Test_Blob::test_size_unset", "tests/unit/test_blob.py::Test_Blob::test_storage_class_getter", "tests/unit/test_blob.py::Test_Blob::test_storage_class_setter", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_false", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_missing", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_true", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_setter", "tests/unit/test_blob.py::Test_Blob::test_test_iam_permissions", "tests/unit/test_blob.py::Test_Blob::test_test_iam_permissions_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_time_created", "tests/unit/test_blob.py::Test_Blob::test_time_created_unset", "tests/unit/test_blob.py::Test_Blob::test_time_deleted", "tests/unit/test_blob.py::Test_Blob::test_time_deleted_unset", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_invalid", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_large_file", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_w_encryption_key_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_updated", "tests/unit/test_blob.py::Test_Blob::test_updated_unset", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_failure", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_success", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_retries", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_rewind", "tests/unit/test_blob.py::Test_Blob::test_upload_from_filename", "tests/unit/test_blob.py::Test_Blob::test_upload_from_filename_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_bytes", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_text", "tests/unit/test_blob.py::Test_Blob::test_user_project", "tests/unit/test_blob.py::Test__quote::test_bad_type", "tests/unit/test_blob.py::Test__quote::test_bytes", "tests/unit/test_blob.py::Test__quote::test_unicode", "tests/unit/test_blob.py::Test__quote::test_w_slash_default", "tests/unit/test_blob.py::Test__quote::test_w_slash_w_safe", "tests/unit/test_blob.py::Test__quote::test_w_tilde", "tests/unit/test_blob.py::Test__maybe_rewind::test_default", "tests/unit/test_blob.py::Test__maybe_rewind::test_do_not_rewind", "tests/unit/test_blob.py::Test__maybe_rewind::test_do_rewind", "tests/unit/test_blob.py::Test__raise_from_invalid_response::test_default", "tests/unit/test_blob.py::Test__raise_from_invalid_response::test_w_206_and_args", "tests/unit/test_blob.py::Test__add_query_parameters::test_w_empty_list", "tests/unit/test_blob.py::Test__add_query_parameters::test_w_existing_qs", "tests/unit/test_blob.py::Test__add_query_parameters::test_wo_existing_qs" ]
[]
Apache License 2.0
9,018
136
[ "google/cloud/storage/blob.py" ]
pymor__pymor-1180
bfaa00045cc0674eac479fa90a9a68db6f2d6276
2020-11-24 09:46:21
70af0f0498d4dbe0630719f2df12043432ab6da6
diff --git a/src/pymor/algorithms/svd_va.py b/src/pymor/algorithms/svd_va.py index d8e364358..d636dada0 100644 --- a/src/pymor/algorithms/svd_va.py +++ b/src/pymor/algorithms/svd_va.py @@ -96,6 +96,10 @@ def method_of_snapshots(A, product=None, modes=None, rtol=1e-7, atol=0., l2_err= if modes is not None: selected_modes = min(selected_modes, modes) + if selected_modes > A.dim: + logger.warning('Number of computed singular vectors larger than array dimension! Truncating ...') + selected_modes = A.dim + s = np.sqrt(evals[:selected_modes]) V = V[:selected_modes] Vh = V.conj()
Better default tolerances for method of snapshots? See discussion in #1012. @lbalicki, as I understand https://www.netlib.org/lapack/lug/node89.html, then every EV smaller than the largest eigenvalue times machine precision should be considered as potentially zero. Everything else should be guaranteed to be a non-zero EV. So maybe it would make sense to throw everything away below that threshold (or a fixed constant times this threshold) and trust that the corresponding eigenvectors are at least not linearly dependent?
pymor/pymor
diff --git a/src/pymortests/algorithms/svd_va.py b/src/pymortests/algorithms/svd_va.py index 43b0d326c..175258c81 100644 --- a/src/pymortests/algorithms/svd_va.py +++ b/src/pymortests/algorithms/svd_va.py @@ -11,6 +11,7 @@ from pymor.algorithms.basic import almost_equal from pymor.algorithms.svd_va import method_of_snapshots, qr_svd from pymor.algorithms.basic import contains_zero_vector from pymor.core.logger import log_levels +from pymor.vectorarrays.numpy import NumpyVectorSpace from pymortests.base import runmodule from pymortests.fixtures.operator import operator_with_arrays_and_products from pymortests.strategies import given_vector_arrays @@ -57,5 +58,12 @@ def test_method_of_snapshots_with_product(operator_with_arrays_and_products, met assert np.all(almost_equal(A, UsVh, rtol=4e-8)) [email protected]('method', methods) +def test_not_too_many_modes(method): + vec_array = NumpyVectorSpace.from_numpy(np.logspace(-5, 0, 10).reshape((-1, 1))) + U, s, V = method(vec_array, atol=0, rtol=0) + assert len(U) == len(s) == len(V) == 1 + + if __name__ == "__main__": runmodule(filename=__file__)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
2020.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio", "hypothesis", "setuptools" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 click==8.1.8 coverage==7.8.0 Cython==3.0.12 diskcache==5.6.3 exceptiongroup==1.2.2 execnet==2.1.1 hypothesis==6.130.5 iniconfig==2.1.0 markdown-it-py==3.0.0 mdurl==0.1.2 numpy==2.0.2 packaging==24.2 pluggy==1.5.0 Pygments==2.19.1 -e git+https://github.com/pymor/pymor.git@bfaa00045cc0674eac479fa90a9a68db6f2d6276#egg=pymor pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-runner==6.0.1 pytest-xdist==3.6.1 Qt.py==1.4.2 rich==14.0.0 scipy==1.13.1 shellingham==1.5.4 sortedcontainers==2.4.0 tomli==2.2.1 typer==0.15.2 types-pyside2==5.15.2.1.7 typing_extensions==4.13.0
name: pymor channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - click==8.1.8 - coverage==7.8.0 - cython==3.0.12 - diskcache==5.6.3 - exceptiongroup==1.2.2 - execnet==2.1.1 - hypothesis==6.130.5 - iniconfig==2.1.0 - markdown-it-py==3.0.0 - mdurl==0.1.2 - numpy==2.0.2 - packaging==24.2 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-runner==6.0.1 - pytest-xdist==3.6.1 - qt-py==1.4.2 - rich==14.0.0 - scipy==1.13.1 - setuptools==49.1.3 - shellingham==1.5.4 - sortedcontainers==2.4.0 - tomli==2.2.1 - typer==0.15.2 - types-pyside2==5.15.2.1.7 - typing-extensions==4.13.0 prefix: /opt/conda/envs/pymor
[ "src/pymortests/algorithms/svd_va.py::test_not_too_many_modes[method_of_snapshots]" ]
[]
[ "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>0-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>0-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>1-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>1-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>2-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>2-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>3-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>3-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>4-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>4-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>5-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>5-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>6-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>6-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>7-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>7-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>8-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>8-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>9-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>9-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>10-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>10-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>11-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>11-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>12-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>12-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>13-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>13-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>14-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>14-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>15-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>15-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>16-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>16-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>17-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>17-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>18-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>18-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>19-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>19-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>20-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>20-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>21-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>21-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>22-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>22-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>23-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>23-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>24-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>24-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>25-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>25-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>26-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>26-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>27-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>27-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>28-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>28-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>29-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>29-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>30-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>30-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>31-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>31-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>32-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>32-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>36-method_of_snapshots]", "src/pymortests/algorithms/svd_va.py::test_method_of_snapshots_with_product[<lambda>36-qr_svd]", "src/pymortests/algorithms/svd_va.py::test_not_too_many_modes[qr_svd]" ]
[]
BSD License
9,025
203
[ "src/pymor/algorithms/svd_va.py" ]
keleshev__schema-247
a600fb465bdf2f4d4896df7d411ad3e57a4713bf
2020-11-25 07:01:13
528742368398d8ae78b0eda423371cd7f24645fd
ioerror: @skorokithakis Would it be possible to merge this PR? skorokithakis: Certainly, sorry, not sure why I didn't see this. Does it work well for you, @ioerror? Have you tried it?
diff --git a/schema.py b/schema.py index ef5dede..a1ada98 100644 --- a/schema.py +++ b/schema.py @@ -220,7 +220,7 @@ class Regex(object): if self._pattern.search(data): return data else: - raise SchemaError("%r does not match %r" % (self, data), e) + raise SchemaError("%r does not match %r" % (self, data), e.format(data) if e else None) except TypeError: raise SchemaError("%r is not string nor buffer" % data, e) @@ -344,7 +344,7 @@ class Schema(object): def validate(self, data): Schema = self.__class__ s = self._schema - e = self._error.format(data) if self._error else None + e = self._error i = self._ignore_extra_keys if isinstance(s, Literal): @@ -397,7 +397,7 @@ class Schema(object): except SchemaError as x: k = "Key '%s' error:" % nkey message = self._prepend_schema_name(k) - raise SchemaError([message] + x.autos, [e] + x.errors) + raise SchemaError([message] + x.autos, [e.format(data) if e else None] + x.errors) else: new[nkey] = nvalue coverage.add(skey) @@ -408,13 +408,13 @@ class Schema(object): s_missing_keys = ", ".join(repr(k) for k in sorted(missing_keys, key=repr)) message = "Missing key%s: %s" % (_plural_s(missing_keys), s_missing_keys) message = self._prepend_schema_name(message) - raise SchemaMissingKeyError(message, e) + raise SchemaMissingKeyError(message, e.format(data) if e else None) if not self._ignore_extra_keys and (len(new) != len(data)): wrong_keys = set(data.keys()) - set(new.keys()) s_wrong_keys = ", ".join(repr(k) for k in sorted(wrong_keys, key=repr)) message = "Wrong key%s %s in %r" % (_plural_s(wrong_keys), s_wrong_keys, data) message = self._prepend_schema_name(message) - raise SchemaWrongKeyError(message, e) + raise SchemaWrongKeyError(message, e.format(data) if e else None) # Apply default-having optionals that haven't been used: defaults = set(k for k in s if type(k) is Optional and hasattr(k, "default")) - coverage @@ -428,36 +428,36 @@ class Schema(object): else: message = "%r should be instance of %r" % (data, s.__name__) message = self._prepend_schema_name(message) - raise SchemaUnexpectedTypeError(message, e) + raise SchemaUnexpectedTypeError(message, e.format(data) if e else None) if flavor == VALIDATOR: try: return s.validate(data) except SchemaError as x: - raise SchemaError([None] + x.autos, [e] + x.errors) + raise SchemaError([None] + x.autos, [e.format(data) if e else None] + x.errors) except BaseException as x: message = "%r.validate(%r) raised %r" % (s, data, x) message = self._prepend_schema_name(message) - raise SchemaError(message, e) + raise SchemaError(message, e.format(data) if e else None) if flavor == CALLABLE: f = _callable_str(s) try: if s(data): return data except SchemaError as x: - raise SchemaError([None] + x.autos, [e] + x.errors) + raise SchemaError([None] + x.autos, [e.format(data) if e else None] + x.errors) except BaseException as x: message = "%s(%r) raised %r" % (f, data, x) message = self._prepend_schema_name(message) - raise SchemaError(message, e) + raise SchemaError(message, e.format(data) if e else None) message = "%s(%r) should evaluate to True" % (f, data) message = self._prepend_schema_name(message) - raise SchemaError(message, e) + raise SchemaError(message, e.format(data) if e else None) if s == data: return data else: message = "%r does not match %r" % (s, data) message = self._prepend_schema_name(message) - raise SchemaError(message, e) + raise SchemaError(message, e.format(data) if e else None) def json_schema(self, schema_id, use_refs=False): """Generate a draft-07 JSON schema dict representing the Schema.
Formatting error when supplying raw dict as schema to `Or` I'm not sure if this is valid Schema usage, but I wanted to drop my reproduction here just in case. I have a schema that looks like this: ```python import schema as s test1 = s.Or(str, { s.Optional("gpu"): str, s.Optional("cpu"): str }, error="Got '{}'") test1.validate({"cpu": "face", "gpu": "cake"}) ``` Trying to run that code throws this error: ```python self = Schema(<class 'dict'>), data = {'cpu': 'face', 'gpu': 'cake'} def validate(self, data): Schema = self.__class__ s = self._schema > e = self._error.format(data) if self._error else None E KeyError: "'cpu'" env/lib/python3.6/site-packages/schema.py:372: KeyError ``` When I trace, it looks like what's happening is that `self._error` on the `Or` schema is getting set to the FILLED schema, on this line: https://github.com/keleshev/schema/blob/master/schema.py#L345 ie, when I insert `print(self._error)` there, I see: ```python Got '{}' Got '{'cpu': 'face', 'gpu': 'cake'}' ``` Which creates this case: ```python >>> "Got '{'cpu': 'face', 'gpu': 'cake'}'".format({'cpu': 'face', 'gpu': 'cake'}) Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: "'cpu'" ``` The issue's solved if I wrap the dictionary in `s.Schema`... but it might be worth having `Or`, `And` etc do the wrapping for us if it sees a raw dict to prevent this issue. Hope this helps!
keleshev/schema
diff --git a/test_schema.py b/test_schema.py index fb7a59f..d8af5e0 100644 --- a/test_schema.py +++ b/test_schema.py @@ -1600,3 +1600,18 @@ def test_prepend_schema_name(): Schema(int, name="custom_schemaname").validate("a") except SchemaUnexpectedTypeError as e: assert str(e) == "'custom_schemaname' 'a' should be instance of 'int'" + + +def test_dict_literal_error_string(): + # this is a simplified regression test of the bug in github issue #240 + assert Schema(Or({"a": 1}, error="error: {}")).is_valid(dict(a=1)) + + +def test_callable_error(): + # this tests for the behavior desired in github pull request #238 + e = None + try: + Schema(lambda d: False, error="{}").validate("This is the error message") + except SchemaError as ex: + e = ex + assert e.errors == ["This is the error message"]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "mock" ], "pre_install": null, "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --doctest-glob=README.rst --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
contextlib2==21.6.0 exceptiongroup==1.2.2 iniconfig==2.1.0 mock==5.2.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 -e git+https://github.com/keleshev/schema.git@a600fb465bdf2f4d4896df7d411ad3e57a4713bf#egg=schema tomli==2.2.1
name: schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - contextlib2==21.6.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - schema==0.7.3 - tomli==2.2.1 prefix: /opt/conda/envs/schema
[ "test_schema.py::test_dict_literal_error_string" ]
[]
[ "test_schema.py::test_schema", "test_schema.py::test_validate_file", "test_schema.py::test_and", "test_schema.py::test_or", "test_schema.py::test_or_only_one", "test_schema.py::test_test", "test_schema.py::test_regex", "test_schema.py::test_validate_list", "test_schema.py::test_list_tuple_set_frozenset", "test_schema.py::test_strictly", "test_schema.py::test_dict", "test_schema.py::test_dict_keys", "test_schema.py::test_ignore_extra_keys", "test_schema.py::test_ignore_extra_keys_validation_and_return_keys", "test_schema.py::test_dict_forbidden_keys", "test_schema.py::test_dict_hook", "test_schema.py::test_dict_optional_keys", "test_schema.py::test_dict_optional_defaults", "test_schema.py::test_dict_subtypes", "test_schema.py::test_dict_key_error", "test_schema.py::test_complex", "test_schema.py::test_nice_errors", "test_schema.py::test_use_error_handling", "test_schema.py::test_or_error_handling", "test_schema.py::test_and_error_handling", "test_schema.py::test_schema_error_handling", "test_schema.py::test_use_json", "test_schema.py::test_error_reporting", "test_schema.py::test_schema_repr", "test_schema.py::test_validate_object", "test_schema.py::test_issue_9_prioritized_key_comparison", "test_schema.py::test_issue_9_prioritized_key_comparison_in_dicts", "test_schema.py::test_missing_keys_exception_with_non_str_dict_keys", "test_schema.py::test_issue_56_cant_rely_on_callables_to_have_name", "test_schema.py::test_exception_handling_with_bad_validators", "test_schema.py::test_issue_83_iterable_validation_return_type", "test_schema.py::test_optional_key_convert_failed_randomly_while_with_another_optional_object", "test_schema.py::test_copy", "test_schema.py::test_inheritance", "test_schema.py::test_literal_repr", "test_schema.py::test_json_schema", "test_schema.py::test_json_schema_with_title", "test_schema.py::test_json_schema_types", "test_schema.py::test_json_schema_other_types", "test_schema.py::test_json_schema_nested", "test_schema.py::test_json_schema_nested_schema", "test_schema.py::test_json_schema_optional_key", "test_schema.py::test_json_schema_optional_key_nested", "test_schema.py::test_json_schema_or_key", "test_schema.py::test_json_schema_or_values", "test_schema.py::test_json_schema_or_values_nested", "test_schema.py::test_json_schema_or_values_with_optional", "test_schema.py::test_json_schema_regex", "test_schema.py::test_json_schema_or_types", "test_schema.py::test_json_schema_or_only_one", "test_schema.py::test_json_schema_and_types", "test_schema.py::test_json_schema_or_one_value", "test_schema.py::test_json_schema_const_is_none", "test_schema.py::test_json_schema_const_is_callable", "test_schema.py::test_json_schema_const_is_custom_type", "test_schema.py::test_json_schema_default_is_custom_type", "test_schema.py::test_json_schema_object_or_array_of_object", "test_schema.py::test_json_schema_and_simple", "test_schema.py::test_json_schema_and_list", "test_schema.py::test_json_schema_forbidden_key_ignored", "test_schema.py::test_json_schema_additional_properties[input_schema0-False-False]", "test_schema.py::test_json_schema_additional_properties[input_schema1-False-True]", "test_schema.py::test_json_schema_additional_properties[input_schema2-False-True]", "test_schema.py::test_json_schema_additional_properties[input_schema3-False-True]", "test_schema.py::test_json_schema_additional_properties[input_schema4-True-True]", "test_schema.py::test_json_schema_additional_properties_multiple", "test_schema.py::test_json_schema_root_not_dict[int-type-integer]", "test_schema.py::test_json_schema_root_not_dict[float-type-number]", "test_schema.py::test_json_schema_root_not_dict[list-type-array]", "test_schema.py::test_json_schema_root_not_dict[bool-type-boolean]", "test_schema.py::test_json_schema_root_not_dict[dict-type-object]", "test_schema.py::test_json_schema_root_not_dict[test-const-test]", "test_schema.py::test_json_schema_root_not_dict[input_schema6-enum-expected_value6]", "test_schema.py::test_json_schema_root_not_dict[input_schema7-anyOf-expected_value7]", "test_schema.py::test_json_schema_root_not_dict[input_schema8-allOf-expected_value8]", "test_schema.py::test_json_schema_array[input_schema0-enum-expected_value0]", "test_schema.py::test_json_schema_array[input_schema1-const-1]", "test_schema.py::test_json_schema_array[input_schema2-type-string]", "test_schema.py::test_json_schema_regex_root", "test_schema.py::test_json_schema_dict_type", "test_schema.py::test_json_schema_title_and_description", "test_schema.py::test_json_schema_description_nested", "test_schema.py::test_json_schema_description_or_nested", "test_schema.py::test_json_schema_literal_with_enum", "test_schema.py::test_json_schema_description_and_nested", "test_schema.py::test_description", "test_schema.py::test_description_with_default", "test_schema.py::test_json_schema_ref_in_list", "test_schema.py::test_json_schema_refs", "test_schema.py::test_json_schema_refs_is_smaller", "test_schema.py::test_json_schema_refs_no_missing", "test_schema.py::test_json_schema_definitions", "test_schema.py::test_json_schema_definitions_and_literals", "test_schema.py::test_json_schema_definitions_nested", "test_schema.py::test_json_schema_definitions_recursive", "test_schema.py::test_json_schema_definitions_invalid", "test_schema.py::test_json_schema_default_value", "test_schema.py::test_json_schema_default_value_with_literal", "test_schema.py::test_json_schema_default_is_none", "test_schema.py::test_json_schema_default_is_tuple", "test_schema.py::test_json_schema_default_is_literal", "test_schema.py::test_prepend_schema_name", "test_schema.py::test_callable_error" ]
[]
MIT License
9,028
1,107
[ "schema.py" ]
pytorch__ignite-1478
cc098a487b16e925edf58dcc2e20d59001c33a82
2020-11-25 19:27:45
a7246e118cf2846b03f4872a1b78adf09e23f4bc
vfdev-5: @sdesrozis tests are failing: https://github.com/pytorch/ignite/runs/1455446251 could you please fix them ?
diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py index b555a41c..000769f2 100644 --- a/ignite/metrics/metric.py +++ b/ignite/metrics/metric.py @@ -300,11 +300,18 @@ class Metric(metaclass=ABCMeta): Args: engine (Engine): the engine to which the metric must be attached + name (str): the name of the metric used as key in dict `engine.state.metrics` """ result = self.compute() if isinstance(result, Mapping): + if name in result.keys(): + raise ValueError( + "Argument name '{}' is conflicting with mapping keys: {}".format(name, list(result.keys())) + ) + for key, value in result.items(): engine.state.metrics[key] = value + engine.state.metrics[name] = result else: if isinstance(result, torch.Tensor) and len(result.size()) == 0: result = result.item()
Breaking change in metrics behaviour ## 🐛 Bug description Following https://github.com/pytorch/ignite/pull/968 (and associated issue ) metric's output is flatten if it returns a mapping/dict. In details, with ignite v0.4.2, for a custom metric's we have the following: ```python class PerformanceIndicators(Metric): def compute(self): # ... return { 'a': 12, 'b': 23 } PerformanceIndicators().attach(evaluator, name="indicators") assert "a" in evaluator.state.metrics assert "b" in evaluator.state.metrics # This is a breaking change introduced by the PR assert not "indicators" in evaluator.state.metrics print(evaluator.state.metrics) > {'a': 12, 'b': 23} ``` The questions we would like to address in this issue: - Should we fix the breaking change by readding the dict into `evaluator.state.metrics` such that we'll have: ``` print(evaluator.state.metrics) > {'a': 12, 'b': 23, 'indicators': {'a': 12, 'b': 23}} ``` - `name` parameter (e.g. "indicators") is never used. Should we append it to the flatten names (e.g. `indicators/a`, `indicators/b`) or we can accept that it is never used ? Thanks @lidq92 for reporting this. ## Environment - PyTorch Version (e.g., 1.4): 1.6.0 - Ignite Version (e.g., 0.3.0): 0.4.2 - OS (e.g., Linux): - How you installed Ignite (`conda`, `pip`, source): - Python version: - Any other relevant information: cc @sdesrozis @liebkne
pytorch/ignite
diff --git a/tests/ignite/metrics/test_metric.py b/tests/ignite/metrics/test_metric.py index f9e46077..c8fa6593 100644 --- a/tests/ignite/metrics/test_metric.py +++ b/tests/ignite/metrics/test_metric.py @@ -672,7 +672,10 @@ def test_completed(): engine = MagicMock(state=State(metrics={})) metrics = {"foo": 1, "bar": torch.tensor(2.0), "baz": {"qux": "quux"}} m.compute = MagicMock(return_value=metrics) + with pytest.raises(ValueError, match=r"Argument name 'foo' is conflicting with mapping keys"): + m.completed(engine, "foo") m.completed(engine, "metric") + metrics["metric"] = metrics assert engine.state.metrics == metrics # other
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc", "pip install torch torchvision -f https://download.pytorch.org/whl/cpu/torch_stable.html -U" ], "python": "3.7", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
absl-py==2.1.0 alembic==1.12.1 arrow==1.2.3 attrs==24.2.0 boto3==1.33.13 botocore==1.33.13 bravado==11.1.0 bravado-core==6.1.1 cached-property==1.5.2 cachetools==5.5.2 certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==7.1.2 cloudpickle==2.2.1 codecov==2.1.13 coverage==7.2.7 cycler==0.11.0 databricks-cli==0.18.0 dill==0.3.7 docker==6.1.3 docker-pycreds==0.4.0 entrypoints==0.4 exceptiongroup==1.2.2 execnet==2.0.2 Flask==1.1.4 fonttools==4.38.0 fqdn==1.5.1 funcsigs==1.0.2 furl==2.1.4 future==1.0.0 gitdb==4.0.12 GitPython==3.1.44 google-auth==2.38.0 google-auth-oauthlib==0.4.6 greenlet==3.1.1 grpcio==1.62.3 gunicorn==20.1.0 gym==0.26.2 gym-notices==0.0.8 hestia==0.6.0 humanfriendly==10.0 idna==3.10 imageio==2.31.2 importlib-metadata==5.2.0 importlib-resources==5.12.0 iniconfig==2.0.0 isoduration==20.11.0 itsdangerous==1.1.0 Jinja2==2.10.3 jmespath==1.0.1 joblib==1.3.2 jsonpatch==1.33 jsonpointer==3.0.0 jsonref==1.1.0 jsonschema==4.17.3 kiwisolver==1.4.5 Mako==1.2.4 Markdown==3.4.4 MarkupSafe==2.1.5 marshmallow==3.0.0rc5 matplotlib==3.5.3 mlflow==1.30.1 monotonic==1.6 msgpack==1.0.5 neptune-client==1.11.1 networkx==2.6.3 numpy==1.21.6 oauthlib==3.2.2 orderedmultidict==1.0.1 packaging==21.3 pandas==1.3.5 pathlib2==2.3.7.post1 Pillow==9.5.0 pkgutil_resolve_name==1.3.10 platformdirs==4.0.0 pluggy==1.2.0 polyaxon-client==0.6.1 polyaxon-schemas==0.6.1 polystores==0.2.5 prometheus-client==0.17.1 prometheus_flask_exporter==0.23.2 protobuf==3.20.3 psutil==7.0.0 pyasn1==0.5.1 pyasn1-modules==0.3.0 PyJWT==2.8.0 pynvml==11.5.3 pyparsing==3.1.4 pyrsistent==0.19.3 pytest==7.4.4 pytest-cov==4.1.0 pytest-xdist==3.5.0 python-dateutil==2.9.0.post0 -e git+https://github.com/pytorch/ignite.git@cc098a487b16e925edf58dcc2e20d59001c33a82#egg=pytorch_ignite pytz==2022.7.1 PyWavelets==1.3.0 PyYAML==6.0.1 querystring-parser==1.2.4 requests==2.31.0 requests-file==2.1.0 requests-oauthlib==2.0.0 requests-toolbelt==1.0.0 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rhea==0.5.5 rsa==4.9 s3transfer==0.8.2 scikit-image==0.19.3 scikit-learn==1.0.2 scipy==1.7.3 sentry-sdk==2.24.1 setproctitle==1.3.3 simplejson==3.20.1 six==1.17.0 smmap==5.0.2 SQLAlchemy==1.4.54 sqlparse==0.4.4 swagger-spec-validator==3.0.3 tabulate==0.9.0 tensorboard==2.11.2 tensorboard-data-server==0.6.1 tensorboard-plugin-wit==1.8.1 tensorboardX==2.6.2.2 threadpoolctl==3.1.0 tifffile==2021.11.2 tomli==2.0.1 torch==1.13.1+cpu torchvision==0.14.1+cpu tornado==6.2 tqdm==4.67.1 trains==0.16.4 typing_extensions==4.7.1 uri-template==1.3.0 urllib3==1.26.20 visdom==0.2.4 wandb==0.18.7 webcolors==1.13 websocket-client==1.6.1 Werkzeug==1.0.1 zipp==3.15.0
name: ignite channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - absl-py==2.1.0 - alembic==1.12.1 - arrow==1.2.3 - attrs==24.2.0 - boto3==1.33.13 - botocore==1.33.13 - bravado==11.1.0 - bravado-core==6.1.1 - cached-property==1.5.2 - cachetools==5.5.2 - charset-normalizer==3.4.1 - click==7.1.2 - cloudpickle==2.2.1 - codecov==2.1.13 - coverage==7.2.7 - cycler==0.11.0 - databricks-cli==0.18.0 - dill==0.3.7 - docker==6.1.3 - docker-pycreds==0.4.0 - entrypoints==0.4 - exceptiongroup==1.2.2 - execnet==2.0.2 - flask==1.1.4 - fonttools==4.38.0 - fqdn==1.5.1 - funcsigs==1.0.2 - furl==2.1.4 - future==1.0.0 - gitdb==4.0.12 - gitpython==3.1.44 - google-auth==2.38.0 - google-auth-oauthlib==0.4.6 - greenlet==3.1.1 - grpcio==1.62.3 - gunicorn==20.1.0 - gym==0.26.2 - gym-notices==0.0.8 - hestia==0.6.0 - humanfriendly==10.0 - idna==3.10 - imageio==2.31.2 - importlib-metadata==5.2.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - isoduration==20.11.0 - itsdangerous==1.1.0 - jinja2==2.10.3 - jmespath==1.0.1 - joblib==1.3.2 - jsonpatch==1.33 - jsonpointer==3.0.0 - jsonref==1.1.0 - jsonschema==4.17.3 - kiwisolver==1.4.5 - mako==1.2.4 - markdown==3.4.4 - markupsafe==2.1.5 - marshmallow==3.0.0rc5 - matplotlib==3.5.3 - mlflow==1.30.1 - monotonic==1.6 - msgpack==1.0.5 - neptune-client==1.11.1 - networkx==2.6.3 - numpy==1.21.6 - oauthlib==3.2.2 - orderedmultidict==1.0.1 - packaging==21.3 - pandas==1.3.5 - pathlib2==2.3.7.post1 - pillow==9.5.0 - pkgutil-resolve-name==1.3.10 - platformdirs==4.0.0 - pluggy==1.2.0 - polyaxon-client==0.6.1 - polyaxon-schemas==0.6.1 - polystores==0.2.5 - prometheus-client==0.17.1 - prometheus-flask-exporter==0.23.2 - protobuf==3.20.3 - psutil==7.0.0 - pyasn1==0.5.1 - pyasn1-modules==0.3.0 - pyjwt==2.8.0 - pynvml==11.5.3 - pyparsing==3.1.4 - pyrsistent==0.19.3 - pytest==7.4.4 - pytest-cov==4.1.0 - pytest-xdist==3.5.0 - python-dateutil==2.9.0.post0 - pytorch-ignite==0.5.0 - pytz==2022.7.1 - pywavelets==1.3.0 - pyyaml==6.0.1 - querystring-parser==1.2.4 - requests==2.31.0 - requests-file==2.1.0 - requests-oauthlib==2.0.0 - requests-toolbelt==1.0.0 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rhea==0.5.5 - rsa==4.9 - s3transfer==0.8.2 - scikit-image==0.19.3 - scikit-learn==1.0.2 - scipy==1.7.3 - sentry-sdk==2.24.1 - setproctitle==1.3.3 - simplejson==3.20.1 - six==1.17.0 - smmap==5.0.2 - sqlalchemy==1.4.54 - sqlparse==0.4.4 - swagger-spec-validator==3.0.3 - tabulate==0.9.0 - tensorboard==2.11.2 - tensorboard-data-server==0.6.1 - tensorboard-plugin-wit==1.8.1 - tensorboardx==2.6.2.2 - threadpoolctl==3.1.0 - tifffile==2021.11.2 - tomli==2.0.1 - torch==1.13.1+cpu - torchvision==0.14.1+cpu - tornado==6.2 - tqdm==4.67.1 - trains==0.16.4 - typing-extensions==4.7.1 - uri-template==1.3.0 - urllib3==1.26.20 - visdom==0.2.4 - wandb==0.18.7 - webcolors==1.13 - websocket-client==1.6.1 - werkzeug==1.0.1 - zipp==3.15.0 prefix: /opt/conda/envs/ignite
[ "tests/ignite/metrics/test_metric.py::test_completed" ]
[]
[ "tests/ignite/metrics/test_metric.py::test_no_transform", "tests/ignite/metrics/test_metric.py::test_transform", "tests/ignite/metrics/test_metric.py::test_output_as_mapping_wrong_keys", "tests/ignite/metrics/test_metric.py::test_output_as_mapping_keys_is_none", "tests/ignite/metrics/test_metric.py::test_output_as_mapping", "tests/ignite/metrics/test_metric.py::test_no_grad", "tests/ignite/metrics/test_metric.py::test_arithmetics", "tests/ignite/metrics/test_metric.py::test_attach", "tests/ignite/metrics/test_metric.py::test_detach", "tests/ignite/metrics/test_metric.py::test_integration", "tests/ignite/metrics/test_metric.py::test_abstract_class", "tests/ignite/metrics/test_metric.py::test_pytorch_operators", "tests/ignite/metrics/test_metric.py::test_indexing_metric", "tests/ignite/metrics/test_metric.py::test_distrib_cpu", "tests/ignite/metrics/test_metric.py::test_usage_exception", "tests/ignite/metrics/test_metric.py::test_epochwise_usage", "tests/ignite/metrics/test_metric.py::test_batchwise_usage", "tests/ignite/metrics/test_metric.py::test_batchfiltered_usage", "tests/ignite/metrics/test_metric.py::test_override_required_output_keys" ]
[]
BSD 3-Clause "New" or "Revised" License
9,035
238
[ "ignite/metrics/metric.py" ]
AzureAD__microsoft-authentication-library-for-python-280
dfbbc66730a1f14cf8c27cb3562ecf9bd611a1a9
2020-11-25 20:00:55
dfbbc66730a1f14cf8c27cb3562ecf9bd611a1a9
diff --git a/msal/token_cache.py b/msal/token_cache.py index b7ebbb9..34eff37 100644 --- a/msal/token_cache.py +++ b/msal/token_cache.py @@ -234,8 +234,9 @@ class TokenCache(object): with self._lock: if new_key_value_pairs: # Update with them entries = self._cache.setdefault(credential_type, {}) - entry = entries.setdefault(key, {}) # Create it if not yet exist - entry.update(new_key_value_pairs) + entries[key] = dict( + old_entry, # Do not use entries[key] b/c it might not exist + **new_key_value_pairs) else: # Remove old_entry self._cache.setdefault(credential_type, {}).pop(key, None)
[Bug] Token cache incompatibility with MSAL.python **Which Version of MSAL are you using ?** 4.21 Details in this PowerShell issue: https://github.com/Azure/azure-powershell/issues/13467 ## Repro Start with PWSH, then use Az CLI, the go back to PWSH. For example: 0. Use Windows (probably the same on Mac and Linux, but repro steps are on Win) 1. Delete the token cache file (C:\Users\<user>\AppData\Local\.IdentityService\msal.cache) 2. `connect-azaccount` (this is a PWSH command, so **MSAL.NET**) 3. `get-azsubscription` (again PWSH command, so **MSAL.NET**) 4. `az group list` (az cli command, so **MSAL.PY**) 5. disconnect-azaccount (PWSH comand, so **MSAL.NET**) **Actual**: serialization exception from MSAL.NET ## Investigation I have snapshots of the cache after step2, step3 and step4 and can provide them on request (will send them via email to you @rayluo ). At step 3, the refresh token section looks like this: ```json "RefreshToken": { "6eeda3a1-c3b9-4e92-a94d-965a50c06de7.72f988bf-86f1-41af-91ab-2d7cd011db47-login.windows.net-refreshtoken-1--": { "home_account_id": "6eeda3a1-c3b9-4e92-a94d-965a50c06de7.72f988bf-86f1-41af-91ab-2d7cd011db47", "environment": "login.windows.net", "client_info": "eyJ1aWQiOiI2ZWVkYTNhMS1jM2I5LTRlOTItYTk0ZC05NjVhNTBjMDZkZTciLCJ1dGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3In0", "client_id": "1950a258-227b-4e31-a9cf-717495945fc2", "secret": "secret", "credential_type": "RefreshToken", "family_id": "1" } }, ``` At step4, it looks like this: ```json "RefreshToken": { "6eeda3a1-c3b9-4e92-a94d-965a50c06de7.72f988bf-86f1-41af-91ab-2d7cd011db47-login.windows.net-refreshtoken-1--": { "home_account_id": "6eeda3a1-c3b9-4e92-a94d-965a50c06de7.72f988bf-86f1-41af-91ab-2d7cd011db47", "environment": "login.windows.net", "client_info": "eyJ1aWQiOiI2ZWVkYTNhMS1jM2I5LTRlOTItYTk0ZC05NjVhNTBjMDZkZTciLCJ1dGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3In0", "client_id": "1950a258-227b-4e31-a9cf-717495945fc2", "secret": "secret", "credential_type": "RefreshToken", "family_id": "1" }, "6eeda3a1-c3b9-4e92-a94d-965a50c06de7.72f988bf-86f1-41af-91ab-2d7cd011db47-login.windows.net-refreshtoken-1950a258-227b-4e31-a9cf-717495945fc2--": { "secret": "secret" } }, ``` The second entry here in step 4 is invalid. CC: @erich-wang
AzureAD/microsoft-authentication-library-for-python
diff --git a/tests/test_token_cache.py b/tests/test_token_cache.py index 1666bba..c846883 100644 --- a/tests/test_token_cache.py +++ b/tests/test_token_cache.py @@ -222,6 +222,24 @@ class TokenCacheTestCase(unittest.TestCase): {}).get("key_id") self.assertEqual(my_key_id, cached_key_id, "AT should be bound to the key") + def test_old_rt_data_with_wrong_key_should_still_be_salvaged_into_new_rt(self): + sample = { + 'client_id': 'my_client_id', + 'credential_type': 'RefreshToken', + 'environment': 'login.example.com', + 'home_account_id': "uid.utid", + 'secret': 'a refresh token', + 'target': 's2 s1 s3', + } + new_rt = "this is a new RT" + self.cache._cache["RefreshToken"] = {"wrong-key": sample} + self.cache.modify( + self.cache.CredentialType.REFRESH_TOKEN, sample, {"secret": new_rt}) + self.assertEqual( + dict(sample, secret=new_rt), + self.cache._cache["RefreshToken"].get( + 'uid.utid-login.example.com-refreshtoken-my_client_id--s2 s1 s3') + ) class SerializableTokenCacheTestCase(TokenCacheTestCase): # Run all inherited test methods, and have extra check in tearDown()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
1.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 cryptography==3.4.8 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work -e git+https://github.com/AzureAD/microsoft-authentication-library-for-python.git@dfbbc66730a1f14cf8c27cb3562ecf9bd611a1a9#egg=msal packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pycparser==2.21 PyJWT==1.7.1 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 requests==2.27.1 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: microsoft-authentication-library-for-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - cffi==1.15.1 - charset-normalizer==2.0.12 - cryptography==3.4.8 - idna==3.10 - pycparser==2.21 - pyjwt==1.7.1 - requests==2.27.1 - urllib3==1.26.20 prefix: /opt/conda/envs/microsoft-authentication-library-for-python
[ "tests/test_token_cache.py::TokenCacheTestCase::test_old_rt_data_with_wrong_key_should_still_be_salvaged_into_new_rt", "tests/test_token_cache.py::SerializableTokenCacheTestCase::test_old_rt_data_with_wrong_key_should_still_be_salvaged_into_new_rt" ]
[]
[ "tests/test_token_cache.py::TokenCacheTestCase::testAddByAad", "tests/test_token_cache.py::TokenCacheTestCase::testAddByAdfs", "tests/test_token_cache.py::TokenCacheTestCase::test_key_id_is_also_recorded", "tests/test_token_cache.py::SerializableTokenCacheTestCase::testAddByAad", "tests/test_token_cache.py::SerializableTokenCacheTestCase::testAddByAdfs", "tests/test_token_cache.py::SerializableTokenCacheTestCase::test_has_state_changed", "tests/test_token_cache.py::SerializableTokenCacheTestCase::test_key_id_is_also_recorded" ]
[]
MIT License
9,036
190
[ "msal/token_cache.py" ]
pre-commit__pre-commit-1715
8670d0b3bce13e5d1140c816c1a8a850cc99edfc
2020-11-25 21:57:11
7727f8777aeedafb567da063fc911414b46a6a3b
diff --git a/pre_commit/clientlib.py b/pre_commit/clientlib.py index 916c5ff..5dfaf7a 100644 --- a/pre_commit/clientlib.py +++ b/pre_commit/clientlib.py @@ -1,6 +1,7 @@ import argparse import functools import logging +import re import shlex import sys from typing import Any @@ -112,6 +113,25 @@ LOCAL = 'local' META = 'meta' +# should inherit from cfgv.Conditional if sha support is dropped +class WarnMutableRev(cfgv.ConditionalOptional): + def check(self, dct: Dict[str, Any]) -> None: + super().check(dct) + + if self.key in dct: + rev = dct[self.key] + + if '.' not in rev and not re.match(r'^[a-fA-F0-9]+$', rev): + logger.warning( + f'The {self.key!r} field of repo {dct["repo"]!r} ' + f'appears to be a mutable reference ' + f'(moving tag / branch). Mutable references are never ' + f'updated after first install and are not supported. ' + f'See https://pre-commit.com/#using-the-latest-version-for-a-repository ' # noqa: E501 + f'for more details.', + ) + + class OptionalSensibleRegex(cfgv.OptionalNoDefault): def check(self, dct: Dict[str, Any]) -> None: super().check(dct) @@ -261,6 +281,14 @@ CONFIG_REPO_DICT = cfgv.Map( ), MigrateShaToRev(), + WarnMutableRev( + 'rev', + cfgv.check_string, + '', + 'repo', + cfgv.NotIn(LOCAL, META), + True, + ), cfgv.WarnAdditionalKeys(('repo', 'rev', 'hooks'), warn_unknown_keys_repo), ) DEFAULT_LANGUAGE_VERSION = cfgv.Map(
issue a warning if a mutable ref is used (`HEAD` / `stable` / `master` / etc.) Perhaps nudging towards https://pre-commit.com/#using-the-latest-version-for-a-repository or `pre-commit autoupdate`
pre-commit/pre-commit
diff --git a/tests/clientlib_test.py b/tests/clientlib_test.py index ba60236..d08ecdf 100644 --- a/tests/clientlib_test.py +++ b/tests/clientlib_test.py @@ -180,6 +180,70 @@ def test_ci_key_must_be_map(): cfgv.validate({'ci': 'invalid', 'repos': []}, CONFIG_SCHEMA) [email protected]( + 'rev', + ( + 'v0.12.4', + 'b27f281', + 'b27f281eb9398fc8504415d7fbdabf119ea8c5e1', + '19.10b0', + '4.3.21-2', + ), +) +def test_warn_mutable_rev_ok(caplog, rev): + config_obj = { + 'repo': 'https://gitlab.com/pycqa/flake8', + 'rev': rev, + 'hooks': [{'id': 'flake8'}], + } + cfgv.validate(config_obj, CONFIG_REPO_DICT) + + assert caplog.record_tuples == [] + + [email protected]( + 'rev', + ( + '', + 'HEAD', + 'stable', + 'master', + 'some_branch_name', + ), +) +def test_warn_mutable_rev_invalid(caplog, rev): + config_obj = { + 'repo': 'https://gitlab.com/pycqa/flake8', + 'rev': rev, + 'hooks': [{'id': 'flake8'}], + } + cfgv.validate(config_obj, CONFIG_REPO_DICT) + + assert caplog.record_tuples == [ + ( + 'pre_commit', + logging.WARNING, + "The 'rev' field of repo 'https://gitlab.com/pycqa/flake8' " + 'appears to be a mutable reference (moving tag / branch). ' + 'Mutable references are never updated after first install and are ' + 'not supported. ' + 'See https://pre-commit.com/#using-the-latest-version-for-a-repository ' # noqa: E501 + 'for more details.', + ), + ] + + +def test_warn_mutable_rev_conditional(): + config_obj = { + 'repo': 'meta', + 'rev': '3.7.7', + 'hooks': [{'id': 'flake8'}], + } + + with pytest.raises(cfgv.ValidationError): + cfgv.validate(config_obj, CONFIG_REPO_DICT) + + def test_validate_optional_sensible_regex(caplog): config_obj = { 'id': 'flake8',
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
2.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cfgv==3.4.0 coverage==7.8.0 distlib==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 filelock==3.18.0 identify==2.6.9 iniconfig==2.1.0 nodeenv==1.9.1 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 -e git+https://github.com/pre-commit/pre-commit.git@8670d0b3bce13e5d1140c816c1a8a850cc99edfc#egg=pre_commit pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 PyYAML==6.0.2 toml==0.10.2 tomli==2.2.1 typing_extensions==4.13.0 virtualenv==20.29.3
name: pre-commit channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cfgv==3.4.0 - coverage==7.8.0 - distlib==0.3.9 - exceptiongroup==1.2.2 - execnet==2.1.1 - filelock==3.18.0 - identify==2.6.9 - iniconfig==2.1.0 - nodeenv==1.9.1 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - pyyaml==6.0.2 - toml==0.10.2 - tomli==2.2.1 - typing-extensions==4.13.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/pre-commit
[ "tests/clientlib_test.py::test_warn_mutable_rev_invalid[]", "tests/clientlib_test.py::test_warn_mutable_rev_invalid[HEAD]", "tests/clientlib_test.py::test_warn_mutable_rev_invalid[stable]", "tests/clientlib_test.py::test_warn_mutable_rev_invalid[master]", "tests/clientlib_test.py::test_warn_mutable_rev_invalid[some_branch_name]" ]
[]
[ "tests/clientlib_test.py::test_check_type_tag_failures[definitely-not-a-tag]", "tests/clientlib_test.py::test_check_type_tag_failures[fiel]", "tests/clientlib_test.py::test_check_type_tag_success", "tests/clientlib_test.py::test_config_valid[config_obj0-True]", "tests/clientlib_test.py::test_config_valid[config_obj1-True]", "tests/clientlib_test.py::test_config_valid[config_obj2-False]", "tests/clientlib_test.py::test_local_hooks_with_rev_fails", "tests/clientlib_test.py::test_config_with_local_hooks_definition_passes", "tests/clientlib_test.py::test_config_schema_does_not_contain_defaults", "tests/clientlib_test.py::test_validate_manifest_main_ok", "tests/clientlib_test.py::test_validate_config_main_ok", "tests/clientlib_test.py::test_validate_config_old_list_format_ok", "tests/clientlib_test.py::test_validate_warn_on_unknown_keys_at_repo_level", "tests/clientlib_test.py::test_validate_warn_on_unknown_keys_at_top_level", "tests/clientlib_test.py::test_ci_map_key_allowed_at_top_level", "tests/clientlib_test.py::test_ci_key_must_be_map", "tests/clientlib_test.py::test_warn_mutable_rev_ok[v0.12.4]", "tests/clientlib_test.py::test_warn_mutable_rev_ok[b27f281]", "tests/clientlib_test.py::test_warn_mutable_rev_ok[b27f281eb9398fc8504415d7fbdabf119ea8c5e1]", "tests/clientlib_test.py::test_warn_mutable_rev_ok[19.10b0]", "tests/clientlib_test.py::test_warn_mutable_rev_ok[4.3.21-2]", "tests/clientlib_test.py::test_warn_mutable_rev_conditional", "tests/clientlib_test.py::test_validate_optional_sensible_regex", "tests/clientlib_test.py::test_mains_not_ok[validate_config_main]", "tests/clientlib_test.py::test_mains_not_ok[validate_manifest_main]", "tests/clientlib_test.py::test_valid_manifests[manifest_obj0-True]", "tests/clientlib_test.py::test_valid_manifests[manifest_obj1-True]", "tests/clientlib_test.py::test_valid_manifests[manifest_obj2-True]", "tests/clientlib_test.py::test_migrate_sha_to_rev_ok[dct0]", "tests/clientlib_test.py::test_migrate_sha_to_rev_ok[dct1]", "tests/clientlib_test.py::test_migrate_sha_to_rev_ok[dct2]", "tests/clientlib_test.py::test_migrate_sha_to_rev_ok[dct3]", "tests/clientlib_test.py::test_migrate_sha_to_rev_dont_specify_both", "tests/clientlib_test.py::test_migrate_sha_to_rev_conditional_check_failures[dct0]", "tests/clientlib_test.py::test_migrate_sha_to_rev_conditional_check_failures[dct1]", "tests/clientlib_test.py::test_migrate_sha_to_rev_conditional_check_failures[dct2]", "tests/clientlib_test.py::test_migrate_to_sha_apply_default", "tests/clientlib_test.py::test_migrate_to_sha_ok", "tests/clientlib_test.py::test_meta_hook_invalid[config_repo0]", "tests/clientlib_test.py::test_meta_hook_invalid[config_repo1]", "tests/clientlib_test.py::test_meta_hook_invalid[config_repo2]", "tests/clientlib_test.py::test_default_language_version_invalid[mapping0]", "tests/clientlib_test.py::test_default_language_version_invalid[mapping1]", "tests/clientlib_test.py::test_minimum_pre_commit_version_failing", "tests/clientlib_test.py::test_minimum_pre_commit_version_passing", "tests/clientlib_test.py::test_warn_additional[schema0]", "tests/clientlib_test.py::test_warn_additional[schema1]" ]
[]
MIT License
9,038
460
[ "pre_commit/clientlib.py" ]
sqlfluff__sqlfluff-581
a5665e0d82dc3d8177fbf4a589623976cb1288be
2020-11-25 22:31:52
60dbf358e2023dfa09073c0f337e106db7f1f9e4
diff --git a/src/sqlfluff/core/rules/base.py b/src/sqlfluff/core/rules/base.py index 0685c1b9e..2e7f98e2c 100644 --- a/src/sqlfluff/core/rules/base.py +++ b/src/sqlfluff/core/rules/base.py @@ -277,7 +277,7 @@ class BaseCrawler: # cause the user to get no results except Exception as e: self.logger.critical( - f"Applying rule {self.code} threw and Exception: {e}", exc_info=True + f"Applying rule {self.code} threw an Exception: {e}", exc_info=True ) vs.append( SQLLintError( diff --git a/src/sqlfluff/core/rules/std.py b/src/sqlfluff/core/rules/std.py index c1f61b63a..af4b3476d 100644 --- a/src/sqlfluff/core/rules/std.py +++ b/src/sqlfluff/core/rules/std.py @@ -3061,7 +3061,11 @@ class Rule_L031(BaseCrawler): table_ref = table_exp.get_child("object_reference") # If this is self-join - skip it - if base_table.raw == table_ref.raw and base_table != table_ref: + if ( + base_table + and base_table.raw == table_ref.raw + and base_table != table_ref + ): continue whitespace_ref = table_exp.get_child("whitespace") @@ -3079,7 +3083,7 @@ class Rule_L031(BaseCrawler): # Find all references to alias in select clause for alias_with_column in select_clause.recursive_crawl("object_reference"): used_alias_ref = alias_with_column.get_child("identifier") - if used_alias_ref.raw == alias_identifier_ref.raw: + if used_alias_ref and used_alias_ref.raw == alias_identifier_ref.raw: ids_refs.append(used_alias_ref) # Find all references to alias in column references
Query on non-table expression throws exception in L031 This query: ```sql select * from table( generator( rowcount=>10000 ) ) ``` Does not select from a table but the generator function In this case rule L031 throws an exception ``` L: 4 | P: 5 | L031 | Unexpected exception: 'NoneType' object has no attribute 'raw'; Could you open an issue at | https://github.com/sqlfluff/sqlfluff/issues ? You can ignore this exception for now, by adding | '--noqa: L031' at the end of line 4 ``` This is not Snowflake specific as this simple query: ```sql select * from values (1, 2, 3) ``` Returns the same error
sqlfluff/sqlfluff
diff --git a/test/core/rules/test_cases/L031.yml b/test/core/rules/test_cases/L031.yml index 03cff7625..2ae39b268 100644 --- a/test/core/rules/test_cases/L031.yml +++ b/test/core/rules/test_cases/L031.yml @@ -77,3 +77,29 @@ test_4: JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id order by o desc + +alias_single_char_identifiers: + pass_str: "select b from tbl as a" + pass_str: "select b from tbl" + +alias_with_wildcard_identifier: + fail_str: "select * from tbl as a" + fix_str: "select * from tbl" + +select_from_values: + pass_str: | + select * + from values(1, 2, 3) + +select_from_table_generator: + pass_str: | + select * + from table( + generator( + rowcount=>10000 + ) + ) + + configs: + core: + dialect: bigquery
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 bench-it==1.0.1 cached-property==2.0.1 click==8.1.8 colorama==0.4.6 configparser==7.2.0 dataclasses==0.6 diff-cover==2.6.1 exceptiongroup==1.2.2 importlib_metadata==8.6.1 inflect==7.5.0 iniconfig==2.1.0 Jinja2==3.1.6 jinja2-pluralize==0.3.0 MarkupSafe==3.0.2 more-itertools==10.6.0 oyaml==1.0 packaging==24.2 pathspec==0.12.1 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 PyYAML==6.0.2 six==1.17.0 -e git+https://github.com/sqlfluff/sqlfluff.git@a5665e0d82dc3d8177fbf4a589623976cb1288be#egg=sqlfluff tomli==2.2.1 typeguard==4.4.2 typing_extensions==4.13.0 zipp==3.21.0
name: sqlfluff channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - bench-it==1.0.1 - cached-property==2.0.1 - click==8.1.8 - colorama==0.4.6 - configparser==7.2.0 - dataclasses==0.6 - diff-cover==2.6.1 - exceptiongroup==1.2.2 - importlib-metadata==8.6.1 - inflect==7.5.0 - iniconfig==2.1.0 - jinja2==3.1.6 - jinja2-pluralize==0.3.0 - markupsafe==3.0.2 - more-itertools==10.6.0 - oyaml==1.0 - packaging==24.2 - pathspec==0.12.1 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - pyyaml==6.0.2 - six==1.17.0 - tomli==2.2.1 - typeguard==4.4.2 - typing-extensions==4.13.0 - zipp==3.21.0 prefix: /opt/conda/envs/sqlfluff
[ "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_alias_with_wildcard_identifier]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_select_from_values]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_select_from_table_generator]" ]
[ "test/core/rules/std_test.py::test__rules__std_file_dbt[L021-models/my_new_project/select_distinct_group_by.sql-violations0]", "test/core/templaters_test.py::test__templater_dbt_profiles_dir_expanded", "test/core/templaters_test.py::test__templater_dbt_templating_result[use_dbt_utils.sql]", "test/core/templaters_test.py::test__templater_dbt_templating_result[macro_in_macro.sql]", "test/core/templaters_test.py::test__templater_dbt_templating_result[use_headers.sql]", "test/core/templaters_test.py::test__templater_dbt_templating_result[use_var.sql]", "test/core/templaters_test.py::test__templater_dbt_templating_absolute_path", "test/core/templaters_test.py::test__templater_dbt_handle_exceptions[compiler_error.sql-dbt", "test/core/templaters_test.py::test__templater_dbt_handle_exceptions[exception_connect_database.sql-dbt", "test/diff_quality_plugin_test.py::test_diff_quality_plugin[test/fixtures/linter/diffquality/parse_error.sql-expected_violations_lines2]" ]
[ "test/api/classes_test.py::test__api__lexer", "test/api/classes_test.py::test__api__parser", "test/api/classes_test.py::test__api__linter_lint", "test/api/classes_test.py::test__api__linter_fix", "test/api/simple_test.py::test__api__lint_string", "test/api/simple_test.py::test__api__lint_file", "test/api/simple_test.py::test__api__lint_string_specific", "test/api/simple_test.py::test__api__fix_string", "test/api/simple_test.py::test__api__fix_string_specific", "test/api/simple_test.py::test__api__parse_string", "test/cli/commands_test.py::test__cli__command_directed", "test/cli/commands_test.py::test__cli__command_dialect", "test/cli/commands_test.py::test__cli__command_lint_stdin[command0]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command1]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command2]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command3]", "test/cli/commands_test.py::test__cli__command_lint_parse[command0]", "test/cli/commands_test.py::test__cli__command_lint_parse[command1]", "test/cli/commands_test.py::test__cli__command_lint_parse[command2]", "test/cli/commands_test.py::test__cli__command_lint_parse[command3]", "test/cli/commands_test.py::test__cli__command_lint_parse[command4]", "test/cli/commands_test.py::test__cli__command_lint_parse[command5]", "test/cli/commands_test.py::test__cli__command_lint_parse[command6]", "test/cli/commands_test.py::test__cli__command_lint_parse[command7]", "test/cli/commands_test.py::test__cli__command_lint_parse[command8]", "test/cli/commands_test.py::test__cli__command_lint_parse[command9]", "test/cli/commands_test.py::test__cli__command_lint_parse[command10]", "test/cli/commands_test.py::test__cli__command_lint_parse[command11]", "test/cli/commands_test.py::test__cli__command_lint_parse[command12]", "test/cli/commands_test.py::test__cli__command_lint_parse[command13]", "test/cli/commands_test.py::test__cli__command_lint_parse[command14]", "test/cli/commands_test.py::test__cli__command_lint_parse[command15]", "test/cli/commands_test.py::test__cli__command_lint_parse[command16]", "test/cli/commands_test.py::test__cli__command_lint_parse[command17]", "test/cli/commands_test.py::test__cli__command_lint_parse[command18]", "test/cli/commands_test.py::test__cli__command_lint_parse[command19]", "test/cli/commands_test.py::test__cli__command_lint_parse[command20]", "test/cli/commands_test.py::test__cli__command_lint_parse[command21]", "test/cli/commands_test.py::test__cli__command_lint_parse[command22]", "test/cli/commands_test.py::test__cli__command_lint_warning_explicit_file_ignored", "test/cli/commands_test.py::test__cli__command_lint_skip_ignore_files", "test/cli/commands_test.py::test__cli__command_versioning", "test/cli/commands_test.py::test__cli__command_version", "test/cli/commands_test.py::test__cli__command_rules", "test/cli/commands_test.py::test__cli__command_dialects", "test/cli/commands_test.py::test__cli__command__fix[L001-test/fixtures/linter/indentation_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/whitespace_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/indentation_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L003-test/fixtures/linter/indentation_error_hard.sql]", "test/cli/commands_test.py::test__cli__command_fix_stdin[select", "test/cli/commands_test.py::test__cli__command_fix_stdin[", "test/cli/commands_test.py::test__cli__command_fix_stdin[SELECT", "test/cli/commands_test.py::test__cli__command_fix_stdin_safety", "test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-y-0]", "test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-n-65]", "test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[yaml]", "test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[json]", "test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[select", "test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[SElect", "test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command0]", "test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command1]", "test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[yaml]", "test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[json]", "test/cli/commands_test.py::test___main___help", "test/cli/formatters_test.py::test__cli__formatters__filename_nocol", "test/cli/formatters_test.py::test__cli__formatters__violation", "test/cli/helpers_test.py::test__cli__helpers__colorize", "test/cli/helpers_test.py::test__cli__helpers__cli_table", "test/cli/helpers_test.py::test__cli__helpers__wrap_elem[abc-5-res0]", "test/cli/helpers_test.py::test__cli__helpers__wrap_elem[how", "test/cli/helpers_test.py::test__cli__helpers__wrap_elem[A", "test/cli/helpers_test.py::test__cli__helpers__wrap_field_a", "test/cli/helpers_test.py::test__cli__helpers__wrap_field_b", "test/cli/helpers_test.py::test__cli__helpers__wrap_field_c", "test/cli/helpers_test.py::test__cli__helpers__pad_line", "test/core/config_test.py::test__config__nested_combine", "test/core/config_test.py::test__config__dict_diff", "test/core/config_test.py::test__config__load_file_dir", "test/core/config_test.py::test__config__load_file_f", "test/core/config_test.py::test__config__load_nested", "test/core/config_test.py::test__config__iter_config_paths_right_order", "test/core/config_test.py::test__config__find_sqlfluffignore_in_same_directory", "test/core/config_test.py::test__config__nested_config_tests", "test/core/dialects/ansi_test.py::test__dialect__ansi__file_lex[a", "test/core/dialects/ansi_test.py::test__dialect__ansi__file_lex[b.c-res1]", "test/core/dialects/ansi_test.py::test__dialect__ansi__file_lex[abc", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectKeywordSegment-select]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[NakedIdentifierSegment-online_sales]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[BareFunctionSegment-current_timestamp]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[FunctionSegment-current_timestamp()]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[NumericLiteralSegment-1000.0]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-online_sales", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[IntervalExpressionSegment-INTERVAL", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-CASE", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-CAST(ROUND(online_sales", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-name", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-MIN", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-DATE_ADD(CURRENT_DATE('America/New_York'),", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-my_array[1]]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-my_array[OFFSET(1)]]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-my_array[5:8]]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-4", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-bits[OFFSET(0)]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-(count_18_24", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-count_18_24", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectStatementSegment-SELECT", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-t.val/t.id]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-CAST(num", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-a.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-a.b.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-a.b.c.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ObjectReferenceSegment-a..c.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment--some_variable]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment--", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-concat(left(uaid,", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-c", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-NULL::INT]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-NULL::INT", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_not_match[ObjectReferenceSegment-\\n", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_not_parse[SELECT", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-update_from.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-select_stmt_cast.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-select_stmt.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-create_table_stmt_3.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-create_table_stmt_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-create_table_stmt.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-collect_stats.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-bteq_stmt.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_window_function_ignore_nulls.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_string_literal.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_semi_structured_3.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_semi_structured_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_semi_structured.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_qualify.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_pivot.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_col_position.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[postgres-postgres_within_group.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[mysql-alter_table.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-string_literals.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_with_offset.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_replace_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_replace.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_quoting.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_multi_except.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_ml_weights.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_ml_predict_with_select.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_for_system_time_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_for_system_time.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_except.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_example.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_datetime.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-interval_function.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-update.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-table_expression.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-shorthand_cast.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_with_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_with_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_where_in_unnest.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_v.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_u.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_trailing_comma_column_list.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_t.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_j.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_i.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_h.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_g.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_f.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_e.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_d.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_c.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_s.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_right.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_r.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_q.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_p.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_o.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_n.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_m.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_l.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_j.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_h.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_g.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_function_in_group_by.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_f.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_e.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_d.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_case_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_case_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_c.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback_work_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback_work.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-revoke_select_on_table_a_from_group_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-multi_statement_c.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-multi_statement_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-multi_statement_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-modulo.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-insert_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_update_on_all_tables_in_schema_a_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_update_insert_on_mytable_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_on_mytable_to_public_with_grant_option.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_on_mytable_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_col1_col2_update_col1_on_mytable_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_all_privileges_on_mytable_to_role.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_all_on_table_mytable_to_role.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_all_on_mytable_to_role.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-functions_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-functions_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-escaped_quotes.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-escape.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_view_a_restrict.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_view_a_cascade.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_view_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_if_exists_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_a_restrict.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_a_cascade.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_model.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-delete_from.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_view_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_varchar.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_table_comment.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_column_comment.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_auto_increment.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_as.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_a_pk_unique_fk_constraints.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_a_column_constraints.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_a_c1_c2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_model_options.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit_work_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit_work.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-arithmetic_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[snowflake-snowflake_semi_structured.sql-True-snowflake_semi_structured.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[snowflake-snowflake_pivot.sql-True-snowflake_pivot.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[bigquery-select_replace_2.sql-True-select_replace_2.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_with_a.sql-True-select_with_a.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_j.sql-True-select_simple_j.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_e.sql-True-select_simple_e.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_e.sql-False-select_simple_e_nc.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_b.sql-True-select_simple_b.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_a.sql-True-select_simple_a.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_e.sql-True-select_e.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_a.sql-True-select_a.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-multi_statement_a.sql-False-multi_statement_a_nc.yml]", "test/core/dialects/snowflake_test.py::test_snowflake_queries[UseStatementSegment-USE", "test/core/linter_test.py::test__linter__path_from_paths__dir", "test/core/linter_test.py::test__linter__path_from_paths__file", "test/core/linter_test.py::test__linter__path_from_paths__not_exist", "test/core/linter_test.py::test__linter__path_from_paths__not_exist_ignore", "test/core/linter_test.py::test__linter__path_from_paths__explicit_ignore", "test/core/linter_test.py::test__linter__path_from_paths__dot", "test/core/linter_test.py::test__linter__path_from_paths__ignore[test/fixtures/linter/sqlfluffignore]", "test/core/linter_test.py::test__linter__path_from_paths__ignore[test/fixtures/linter/sqlfluffignore/]", "test/core/linter_test.py::test__linter__path_from_paths__ignore[test/fixtures/linter/sqlfluffignore/.]", "test/core/linter_test.py::test__linter__lint_string_vs_file[test/fixtures/linter/indentation_errors.sql]", "test/core/linter_test.py::test__linter__lint_string_vs_file[test/fixtures/linter/whitespace_errors.sql]", "test/core/linter_test.py::test__linter__linting_result__sum_dicts", "test/core/linter_test.py::test__linter__linting_result__combine_dicts", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice0-matcher_keywords0-False-result_slice0]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice1-matcher_keywords1-True-result_slice1]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice2-matcher_keywords2-False-None]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice3-matcher_keywords3-True-result_slice3]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__adv", "test/core/parser/grammar_test.py::test__parser__grammar__base__look_ahead_match[seg_list_slice0-matcher_keywords0-result_slice0-bar-None]", "test/core/parser/grammar_test.py::test__parser__grammar__base__look_ahead_match[seg_list_slice1-matcher_keywords1-result_slice1-foo-pre_match_slice1]", "test/core/parser/grammar_test.py::test__parser__grammar__base__ephemeral_segment", "test/core/parser/grammar_test.py::test__parser__grammar__base__bracket_sensitive_look_ahead_match", "test/core/parser/grammar_test.py::test__parser__grammar_oneof[True]", "test/core/parser/grammar_test.py::test__parser__grammar_oneof[False]", "test/core/parser/grammar_test.py::test__parser__grammar_oneof_exclude", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_a[baar-False]", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_a[bar-True]", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_b[False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_b[True-4]", "test/core/parser/grammar_test.py::test__parser__grammar_sequence", "test/core/parser/grammar_test.py::test__parser__grammar_sequence_nested", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list0-None-True-False-5]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list1-None-True-False-6]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list2-None-True-False-0]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list3-None-True-True-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list4-0-True-False-5]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list5-0-False-False-1]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list6-1-True-False-5]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list7-1-False-False-0]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list8-None-True-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list9-None-False-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list10-1-True-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list11-1-False-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[foo-False-1]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[bar-False-0]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[baar-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[baar-True-5]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil_bracketed", "test/core/parser/grammar_test.py::test__parser__grammar_anything", "test/core/parser/grammar_test.py::test__parser__grammar_nothing", "test/core/parser/grammar_test.py::test__parser__grammar_noncode", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list0-0-3-0]", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list1-0-3-0]", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list2-0-0-0]", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list3-3-3-3]", "test/core/parser/lexer_test.py::test__parser__lexer_obj[a", "test/core/parser/lexer_test.py::test__parser__lexer_obj[b.c-res1]", "test/core/parser/lexer_test.py::test__parser__lexer_obj[abc", "test/core/parser/lexer_test.py::test__parser__lexer_obj[abc'\\n", "test/core/parser/lexer_test.py::test__parser__lexer_obj[*-+bd/-res8]", "test/core/parser/lexer_test.py::test__parser__lexer_obj[2+4", "test/core/parser/lexer_test.py::test__parser__lexer_obj[when", "test/core/parser/lexer_test.py::test__parser__lexer_singleton[.fsaljk-.]", "test/core/parser/lexer_test.py::test__parser__lexer_singleton[fsaljk-None]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[fsaljk-f-f0]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[fsaljk-f-f1]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[fsaljk-[fas]*-fsa]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[", "test/core/parser/lexer_test.py::test__parser__lexer_regex['something", "test/core/parser/lexer_test.py::test__parser__lexer_regex['", "test/core/parser/lexer_test.py::test__parser__lexer_multimatcher", "test/core/parser/lexer_test.py::test__parser__lexer_fail", "test/core/parser/lexer_test.py::test__parser__lexer_fail_via_parse", "test/core/parser/markers_test.py::test__parser__common_marker", "test/core/parser/markers_test.py::test__parser__common_marker_format", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>0-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>0-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>1-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>1-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>2-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>2-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>3-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>3-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct_from_empty", "test/core/parser/match_test.py::test__parser__match_add[<lambda>0]", "test/core/parser/match_test.py::test__parser__match_add[<lambda>1]", "test/core/parser/match_test.py::test__parser__match_add[<lambda>2]", "test/core/parser/match_test.py::test__parser__match_add[<lambda>3]", "test/core/parser/match_test.py::test__parser__match_add_raises[string]", "test/core/parser/match_test.py::test__parser__match_add_raises[fail_case1]", "test/core/parser/match_test.py::test__parser__match_add_raises[fail_case2]", "test/core/parser/match_test.py::test__parser__match_add_raises[fail_case3]", "test/core/parser/parse_test.py::test__parser__parse_match", "test/core/parser/parse_test.py::test__parser__parse_parse", "test/core/parser/parse_test.py::test__parser__parse_expand", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw_init", "test/core/parser/segments_base_test.py::test__parser__base_segments_type", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw", "test/core/parser/segments_base_test.py::test__parser__base_segments_base", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw_compare", "test/core/parser/segments_base_test.py::test__parser__base_segments_base_compare", "test/core/parser/segments_common_test.py::test__parser__core_keyword", "test/core/parser/segments_common_test.py::test__parser__core_ephemeral_segment", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L001_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L002_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_8]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_9]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_10]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_11]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_12]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L005_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L008_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L008_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L008_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L011_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_comma_violations]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_commas_allowed]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_comma_violations_in_with_statement]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_commas_allowed_in_with_statement]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_trailing_comma_violations]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_trailing_commas_allowed]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_trailing_comma_fixing_removes_extra_whitespace]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_comma_fixing_flows_around_comments]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L020_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L021_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L021_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_8]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L023_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L024_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L024_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L025_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L025_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L025_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L030_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L030_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_alias_single_char_identifiers]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L032_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L032_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L032_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_6]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[snowflake-001_L014_semi_structured-L014]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[bigquery-001_L003L006L009_templating-L003,L006,L009]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-010_L022_CTEs_and_newlines-L022]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-009_L010_keyword_capitalisation-L010]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-008_L022L023L024_with_clause-L022,L023,L024]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-007_L018_with_clause_1-L018]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-006_L003_indentation_4-L003]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-005_L017_function_spacing-L017]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-004_L003_indentation_3-L003]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-003_L016_long_line_2-L016]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-002_L003_indentation_2-L003]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-001_L016_long_line_1-L016]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L001-test/fixtures/linter/indentation_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L008-test/fixtures/linter/whitespace_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L008-test/fixtures/linter/indentation_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L010-test/fixtures/linter/whitespace_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L011-test/fixtures/parser/ansi/select_simple_i.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L012-test/fixtures/parser/ansi/select_simple_i.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix_templated[L010]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix_templated[L001]", "test/core/rules/std_test.py::test__rules__user_rules", "test/core/rules/std_test.py::test__rules__runaway_fail_catch", "test/core/rules/std_test.py::test__rules__std_file[L001-test/fixtures/linter/indentation_errors.sql-violations0]", "test/core/rules/std_test.py::test__rules__std_file[L002-test/fixtures/linter/indentation_errors.sql-violations1]", "test/core/rules/std_test.py::test__rules__std_file[L003-test/fixtures/linter/indentation_errors.sql-violations2]", "test/core/rules/std_test.py::test__rules__std_file[L004-test/fixtures/linter/indentation_errors.sql-violations3]", "test/core/rules/std_test.py::test__rules__std_file[L005-test/fixtures/linter/whitespace_errors.sql-violations4]", "test/core/rules/std_test.py::test__rules__std_file[L019-test/fixtures/linter/whitespace_errors.sql-violations5]", "test/core/rules/std_test.py::test__rules__std_file[L008-test/fixtures/linter/whitespace_errors.sql-violations6]", "test/core/rules/std_test.py::test__rules__std_file[L006-test/fixtures/linter/operator_errors.sql-violations7]", "test/core/rules/std_test.py::test__rules__std_file[L007-test/fixtures/linter/operator_errors.sql-violations8]", "test/core/rules/std_test.py::test__rules__std_file[L006-test/fixtures/linter/operator_errors_negative.sql-violations9]", "test/core/rules/std_test.py::test__rules__std_file[L003-test/fixtures/linter/indentation_error_hard.sql-violations10]", "test/core/rules/std_test.py::test__rules__std_file[L003-test/fixtures/linter/indentation_error_contained.sql-violations11]", "test/core/rules/std_test.py::test__rules__std_file[L016-test/fixtures/linter/block_comment_errors.sql-violations12]", "test/core/rules/std_test.py::test__rules__std_file[L016-test/fixtures/linter/block_comment_errors_2.sql-violations13]", "test/core/rules/std_test.py::test__rules__std_file[L027-test/fixtures/linter/column_references.sql-violations14]", "test/core/rules/std_test.py::test__rules__std_file[L027-test/fixtures/linter/column_references_bare_function.sql-violations15]", "test/core/rules/std_test.py::test__rules__std_file[L026-test/fixtures/linter/column_references.sql-violations16]", "test/core/rules/std_test.py::test__rules__std_file[L025-test/fixtures/linter/column_references.sql-violations17]", "test/core/rules/std_test.py::test__rules__std_file[L021-test/fixtures/linter/select_distinct_group_by.sql-violations18]", "test/core/rules/std_test.py::test__rules__std_file[L006-test/fixtures/linter/operator_errors_ignore.sql-violations19]", "test/core/rules/std_test.py::test__rules__std_file[L031-test/fixtures/linter/aliases_in_join_error.sql-violations20]", "test/core/rules/std_test.py::test__rules__std_L003_process_raw_stack", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict0]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict1]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict2]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict3]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict4]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict5]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict6]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict7]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict8]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict9]", "test/core/rules/std_test.py::test_rules_cannot_be_instantiated_without_declared_configs", "test/core/rules/std_test.py::test_rules_configs_are_dynamically_documented", "test/core/rules/std_test.py::test_rule_exception_is_caught_to_validation", "test/core/templaters_test.py::test__templater_selection", "test/core/templaters_test.py::test__templater_raw", "test/core/templaters_test.py::test__templater_python", "test/core/templaters_test.py::test__templater_python_error", "test/core/templaters_test.py::test__templater_jinja", "test/core/templaters_test.py::test__templater_jinja_error", "test/core/templaters_test.py::test__templater_jinja_error_catatrophic", "test/core/templaters_test.py::test__templater_full[jinja_a/jinja-True]", "test/core/templaters_test.py::test__templater_full[jinja_b/jinja-False]", "test/core/templaters_test.py::test__templater_full[jinja_c_dbt/dbt_builtins-True]", "test/core/templaters_test.py::test__templater_full[jinja_e/jinja-True]", "test/core/templaters_test.py::test__templater_full[jinja_f/jinja-True]", "test/core/templaters_test.py::test__templater_full[jinja_g_macros/jinja-True]", "test/core/templaters_test.py::test__templater_dbt_missing", "test/diff_quality_plugin_test.py::test_diff_quality_plugin[test/fixtures/linter/indentation_errors.sql-expected_violations_lines0]", "test/diff_quality_plugin_test.py::test_diff_quality_plugin[test/fixtures/linter/parse_error.sql-expected_violations_lines1]" ]
[]
MIT License
9,039
473
[ "src/sqlfluff/core/rules/base.py", "src/sqlfluff/core/rules/std.py" ]
dwavesystems__dwave-system-368
fb54cce35866a879bfce56dfb2de28fb3629c804
2020-11-26 01:46:47
32e9065cddeb123106b43d947877043c1a2ccc78
diff --git a/dwave/system/composites/reversecomposite.py b/dwave/system/composites/reversecomposite.py index 7f9f54b..669dbeb 100644 --- a/dwave/system/composites/reversecomposite.py +++ b/dwave/system/composites/reversecomposite.py @@ -84,17 +84,21 @@ class ReverseAdvanceComposite(dimod.ComposedSampler): return {'child_properties': self.child.properties.copy()} def sample(self, bqm, anneal_schedules=None, **parameters): - """Sample the binary quadratic model using reverse annealing along a given set of anneal schedules. + """Sample the binary quadratic model using reverse annealing along a given set + of anneal schedules. Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. - anneal_schedules (list of lists): Anneal schedules in order of submission. Each schedule is - formatted as a list of [time, s] pairs + anneal_schedules (list of lists, optional, default=[[[0, 1], [1, 0.35], [9, 0.35], [10, 1]]]): + Anneal schedules in order of submission. Each schedule is formatted + as a list of [time, s] pairs, in which time is in microseconds and s + is the normalized persistent current in the range [0,1]. - initial_state (dict, optional): the state to reverse anneal from. If not provided, it will - be randomly generated + initial_state (dict, optional): + The state to reverse anneal from. If not provided, it will + be randomly generated. **parameters: Parameters for the sampling method, specified by the child sampler. @@ -128,7 +132,7 @@ class ReverseAdvanceComposite(dimod.ComposedSampler): child = self.child if anneal_schedules is None: - return child.sample(bqm, **parameters) + anneal_schedules = [[[0, 1], [1, 0.35], [9, 0.35], [10, 1]]] vartype_values = list(bqm.vartype.value) if 'initial_state' not in parameters: @@ -145,16 +149,34 @@ class ReverseAdvanceComposite(dimod.ComposedSampler): if "answer_mode" in child.parameters: parameters['answer_mode'] = 'histogram' - vectors = {} + samplesets = None for schedule_idx, anneal_schedule in enumerate(anneal_schedules): sampleset = child.sample(bqm, anneal_schedule=anneal_schedule, initial_state=initial_state, **parameters) - # update vectors initial_state, _ = dimod.as_samples(initial_state) - vectors = _update_data_vector(vectors, sampleset, - {'initial_state': [initial_state[0]] * len(sampleset.record.energy), - 'schedule_index': [schedule_idx] * len(sampleset.record.energy)}) + + if 'initial_state' not in sampleset.record.dtype.names: + init_state_vect = [] + + if parameters['reinitialize_state']: + init_state_vect = [initial_state[0].copy() for i in range(len(sampleset.record.energy))] + else: + # each sample is the next sample's initial state + init_state_vect.append(initial_state[0].copy()) + for sample in sampleset.record.sample[:-1]: + init_state_vect.append(sample) + + sampleset = dimod.append_data_vectors(sampleset, initial_state=init_state_vect) + + if 'schedule_index' not in sampleset.record.dtype.names: + schedule_index_vect = [schedule_idx] * len(sampleset.record.energy) + sampleset = dimod.append_data_vectors(sampleset, schedule_index=schedule_index_vect) + + if samplesets is None: + samplesets = sampleset + else: + samplesets = dimod.concatenate((samplesets, sampleset)) if schedule_idx+1 == len(anneal_schedules): # no need to create the next initial state - last iteration @@ -171,11 +193,8 @@ class ReverseAdvanceComposite(dimod.ComposedSampler): # if not reinitialized, take the last state as the next initial state initial_state = dict(zip(sampleset.variables, sampleset.record.sample[-1])) - samples = vectors.pop('sample') - return dimod.SampleSet.from_samples((samples, bqm.variables), - bqm.vartype, - info={'anneal_schedules': anneal_schedules}, - **vectors) + samplesets.info['anneal_schedules'] = anneal_schedules + return samplesets class ReverseBatchStatesComposite(dimod.ComposedSampler, dimod.Initialized): @@ -324,8 +343,8 @@ class ReverseBatchStatesComposite(dimod.ComposedSampler, dimod.Initialized): sampleset = child.sample(bqm, initial_state=dict(zip(bqm.variables, initial_state)), **parameters) if 'initial_state' not in sampleset.record.dtype.names: - init_state_vector = [initial_state] * len(sampleset.record.energy) - sampleset = dimod.append_data_vectors(sampleset, initial_state=init_state_vector) + init_state_vect = [initial_state.copy() for i in range(len(sampleset.record.energy))] + sampleset = dimod.append_data_vectors(sampleset, initial_state=init_state_vect) if samplesets is None: samplesets = sampleset @@ -333,19 +352,3 @@ class ReverseBatchStatesComposite(dimod.ComposedSampler, dimod.Initialized): samplesets = dimod.concatenate((samplesets, sampleset)) return samplesets - -def _update_data_vector(vectors, sampleset, additional_parameters=None): - var_names = sampleset.record.dtype.names - for name in var_names: - try: - vectors[name] = vectors[name] + list(sampleset.record[name]) - except KeyError: - vectors[name] = list(sampleset.record[name]) - - for key, val in additional_parameters.items(): - if key not in var_names: - try: - vectors[key] = vectors[key] + list(val) - except KeyError: - vectors[key] = list(val) - return vectors
Reverse-annealing composites should not silently forward anneal **Current Problem** If `ReverseAdvanceComposite.sample` is not given an `anneal_schedules` parameter it samples directly from the child without doing any reverse annealing at all ([`reversecomposite.py` lines 86–87](https://github.com/dwavesystems/dwave-system/blob/cfec1fa6e34631403a3fcabd556ec954e14f7d29/dwave/system/composites/reversecomposite.py#L86-L87)). Similarly, if `ReverseBatchStatesComposite.sample` is not given an `initial_states` parameter it samples directly from the child without doing any reverse annealing at all ([`reversecomposite.py` lines 180–181](https://github.com/dwavesystems/dwave-system/blob/cfec1fa6e34631403a3fcabd556ec954e14f7d29/dwave/system/composites/reversecomposite.py#L180-L181)). I'm new to reverse annealing and struggled to realize why nothing seemed different when I used either of the reverse-annealing composites. I didn't realize that each composite has its own parameters that must be provided for reverse annealing to take effect. **Proposed Solution** Rather than silently fall back on forward annealing, the two reverse-annealing composites should throw an exception if a parameter needed for reverse annealing is not supplied. **Alternatives Considered** An alternative is for the documentation to clarify that nothing special will happen unless certain parameters are provided.
dwavesystems/dwave-system
diff --git a/tests/test_reverse_composite.py b/tests/test_reverse_composite.py index 1327242..761a457 100644 --- a/tests/test_reverse_composite.py +++ b/tests/test_reverse_composite.py @@ -154,6 +154,15 @@ class TestReverseIsing(unittest.TestCase): response = sampler.sample_ising(h, J, initial_states=initial_states, initial_states_generator='none', num_reads=num_reads) self.assertEqual(len(response), 4) + def test_advance_no_schedules(self): + sampler = ReverseAdvanceComposite(MockReverseSampler()) + + h = {0: -1., 4: 2} + J = {(0, 4): 1.5} + + response = sampler.sample_ising(h, J) + self.assertIn('schedule_index', response.record.dtype.names) + def test_advance_correct_schedules(self): sampler = ReverseAdvanceComposite(MockReverseSampler()) @@ -191,9 +200,9 @@ class TestReverseIsing(unittest.TestCase): vars = response.variables for datum in response.data(fields=['initial_state', 'schedule_index']): if datum.schedule_index == 0: - self.assertListEqual([initial[v] for v in vars], list(datum.initial_state)) - if datum.schedule_index > 1: - self.assertListEqual([1, -1], list(datum.initial_state)) + self.assertListEqual([initial[v] for v in vars], list(datum.initial_state)) # initial_state = state that was passed in + else: + self.assertListEqual([1, -1], list(datum.initial_state)) # initial_state = best state found in last schedule def test_correct_initial_state_used_reinit(self): sampler = ReverseAdvanceComposite(MockReverseSampler()) @@ -202,15 +211,16 @@ class TestReverseIsing(unittest.TestCase): J = {(0, 4): 1.5} anneal_schedules = [[[0, 1], [1, 0.5], [2, 0.5], [3, 1]], [[0, 1], [1, 0.5], [2, 0.5], [3, 1]]] initial = {0: -1, 4: -1} + response = sampler.sample_ising(h, J, anneal_schedules=anneal_schedules, initial_state=initial, reinitialize_state=False) vars = response.variables - for datum in response.data(fields=['initial_state', 'schedule_index']): - if datum.schedule_index == 0: - self.assertListEqual([initial[v] for v in vars], list(datum.initial_state)) - if datum.schedule_index > 1: - self.assertListEqual([-1, 1], list(datum.initial_state)) + + init = [initial[v] for v in vars] + for datum in response.data(fields=['sample', 'initial_state'], sorted_by=None): + self.assertListEqual(init, list(datum.initial_state)) + init = [datum.sample[v] for v in vars] # sample should be the initial state of the next sample def test_combination(self): sampler = ReverseBatchStatesComposite(ReverseAdvanceComposite(MockReverseSampler()))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[drivers] --extra-index-url https://pypi.dwavesys.com/simple", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "parameterized", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 decorator==4.4.2 dimod==0.9.11 dwave-cloud-client==0.8.1 dwave-drivers==0.4.4 dwave-networkx==0.8.4 -e git+https://github.com/dwavesystems/dwave-system.git@fb54cce35866a879bfce56dfb2de28fb3629c804#egg=dwave_system dwave-tabu==0.2.2 exceptiongroup==1.2.2 fasteners==0.19 homebase==1.0.1 idna==3.10 importlib-metadata==6.7.0 iniconfig==2.0.0 minorminer==0.2.0 mock==2.0.0 networkx==2.6.3 numpy==1.18.0 packaging==24.0 parameterized==0.9.0 pbr==6.1.1 plucky==0.4.3 pluggy==1.2.0 PySocks==1.7.1 pytest==7.4.4 python-dateutil==2.9.0.post0 requests==2.31.0 scipy==1.7.3 six==1.11.0 tomli==2.0.1 typing_extensions==4.7.1 urllib3==2.0.7 zipp==3.15.0
name: dwave-system channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==3.4.1 - click==8.1.8 - decorator==4.4.2 - dimod==0.9.11 - dwave-cloud-client==0.8.1 - dwave-drivers==0.4.4 - dwave-networkx==0.8.4 - dwave-tabu==0.2.2 - exceptiongroup==1.2.2 - fasteners==0.19 - homebase==1.0.1 - idna==3.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - minorminer==0.2.0 - mock==2.0.0 - networkx==2.6.3 - numpy==1.18.0 - packaging==24.0 - parameterized==0.9.0 - pbr==6.1.1 - plucky==0.4.3 - pluggy==1.2.0 - pysocks==1.7.1 - pytest==7.4.4 - python-dateutil==2.9.0.post0 - requests==2.31.0 - scipy==1.7.3 - six==1.11.0 - tomli==2.0.1 - typing-extensions==4.7.1 - urllib3==2.0.7 - zipp==3.15.0 prefix: /opt/conda/envs/dwave-system
[ "tests/test_reverse_composite.py::TestReverseIsing::test_advance_no_schedules", "tests/test_reverse_composite.py::TestReverseIsing::test_correct_initial_state_used_reinit" ]
[]
[ "tests/test_reverse_composite.py::TestConstruction::test_instantiation_smoketest_advance", "tests/test_reverse_composite.py::TestConstruction::test_instantiation_smoketest_batch", "tests/test_reverse_composite.py::TestReverseIsing::test_advance_correct_schedules", "tests/test_reverse_composite.py::TestReverseIsing::test_batch_correct_states", "tests/test_reverse_composite.py::TestReverseIsing::test_batch_generate_more_initial_states", "tests/test_reverse_composite.py::TestReverseIsing::test_batch_no_initial_states", "tests/test_reverse_composite.py::TestReverseIsing::test_batch_truncate_initial_states", "tests/test_reverse_composite.py::TestReverseIsing::test_combination", "tests/test_reverse_composite.py::TestReverseIsing::test_correct_initial_state_input", "tests/test_reverse_composite.py::TestReverseIsing::test_correct_initial_state_used", "tests/test_reverse_composite.py::TestReverseIsing::test_sample_ising_advance", "tests/test_reverse_composite.py::TestReverseIsing::test_sample_ising_batch", "tests/test_reverse_composite.py::TestReverseBinary::test_sample_qubo_advance", "tests/test_reverse_composite.py::TestReverseBinary::test_sample_qubo_batch" ]
[]
Apache License 2.0
9,040
1,445
[ "dwave/system/composites/reversecomposite.py" ]
enthought__traits-futures-255
60efc6ca839fbcb235ebc886d64fa0ce4ce19fc5
2020-11-26 13:02:21
f18050d302e85669c869304832eabf25b2ff9159
mdickinson: > it would be nice to put together dev docs on the topic though, while this is all still fresh in your head Agreed. I'll open an issue, and point to the existing PR description that contained a dump of the routing stuff. mdickinson: > I'll open an issue Opened #261
diff --git a/traits_futures/message_router.py b/traits_futures/message_router.py index 9ca7d3f..b2e4db4 100644 --- a/traits_futures/message_router.py +++ b/traits_futures/message_router.py @@ -69,13 +69,15 @@ class MessageRouter(HasStrictTraits): """ Prepare router for routing. """ - pass + self._pingee = Pingee(on_ping=self._route_message) + self._pingee.connect() def disconnect(self): """ Undo any connections made by the ``connect`` call. """ - pass + self._pingee.disconnect() + self._pingee = None # Private traits ########################################################## @@ -110,6 +112,3 @@ class MessageRouter(HasStrictTraits): def __connection_ids_default(self): return itertools.count() - - def __pingee_default(self): - return Pingee(on_ping=self._route_message) diff --git a/traits_futures/null/pinger.py b/traits_futures/null/pinger.py index 59a1e15..8da4aed 100644 --- a/traits_futures/null/pinger.py +++ b/traits_futures/null/pinger.py @@ -22,26 +22,45 @@ class Pingee: """ Receiver for pings. + Whenever a ping is received from a linked Pingee, the receiver + calls the given fixed parameterless callable. + + The ping receiver must be connected (using the ``connect``) method + before use, and should call ``disconnect`` when it's no longer + expected to receive pings. + Parameters ---------- on_ping : callable - Zero-argument callable that's executed on the main thread as a - result of each ping. + Zero-argument callable that's called on the main thread + every time a ping is received. """ def __init__(self, on_ping): - self._event_loop = asyncio.get_event_loop() self._on_ping = on_ping + def connect(self): + """ + Prepare Pingee to receive pings. + """ + self._event_loop = asyncio.get_event_loop() + + def disconnect(self): + """ + Undo any connections made in the connect method. + """ + del self._event_loop + class Pinger: """ - Ping emitter, which can emit pings in a thread-safe manner. + Ping emitter, which can send pings to a receiver in a thread-safe manner. Parameters ---------- pingee : Pingee - The corresponding ping receiver. + The target receiver for the pings. The receiver must already be + connected. """ def __init__(self, pingee): diff --git a/traits_futures/qt/pinger.py b/traits_futures/qt/pinger.py index b510eeb..ef435a3 100644 --- a/traits_futures/qt/pinger.py +++ b/traits_futures/qt/pinger.py @@ -30,11 +30,18 @@ class Pingee(QObject): """ Receiver for pings. + Whenever a ping is received from a linked Pingee, the receiver + calls the given fixed parameterless callable. + + The ping receiver must be connected (using the ``connect``) method + before use, and should call ``disconnect`` when it's no longer + expected to receive pings. + Parameters ---------- on_ping : callable - Zero-argument callable that's executed on the main thread as a - result of each ping. + Zero-argument callable that's called on the main thread + every time a ping is received. """ def __init__(self, on_ping): @@ -45,15 +52,28 @@ class Pingee(QObject): def _execute_ping_callback(self): self._on_ping() + def connect(self): + """ + Prepare Pingee to receive pings. + """ + pass + + def disconnect(self): + """ + Undo any connections made in the connect method. + """ + pass + class Pinger: """ - Ping emitter, which can emit pings in a thread-safe manner. + Ping emitter, which can send pings to a receiver in a thread-safe manner. Parameters ---------- pingee : Pingee - The corresponding ping receiver. + The target receiver for the pings. The receiver must already be + connected. """ def __init__(self, pingee): diff --git a/traits_futures/wx/pinger.py b/traits_futures/wx/pinger.py index f1ed51b..df65e74 100644 --- a/traits_futures/wx/pinger.py +++ b/traits_futures/wx/pinger.py @@ -24,12 +24,13 @@ _PingEvent, _PingEventBinder = wx.lib.newevent.NewEvent() class Pinger: """ - Ping emitter, which can emit pings in a thread-safe manner. + Ping emitter, which can send pings to a receiver in a thread-safe manner. Parameters ---------- pingee : Pingee - The corresponding ping receiver. + The target receiver for the pings. The receiver must already be + connected. """ def __init__(self, pingee): @@ -60,6 +61,13 @@ class Pingee(wx.EvtHandler): """ Receiver for pings. + Whenever a ping is received from a linked Pingee, the receiver + calls the given fixed parameterless callable. + + The ping receiver must be connected (using the ``connect``) method + before use, and should call ``disconnect`` when it's no longer + expected to receive pings. + Parameters ---------- on_ping : callable @@ -69,11 +77,16 @@ class Pingee(wx.EvtHandler): def __init__(self, on_ping): wx.EvtHandler.__init__(self) - self._on_ping = on_ping - self.Bind(_PingEventBinder, self._on_ping_event) + self._on_ping = lambda event: on_ping() - def _on_ping_event(self, event): + def connect(self): + """ + Prepare Pingee to receive pings. + """ + self.Bind(_PingEventBinder, handler=self._on_ping) + + def disconnect(self): """ - Handler for events of type _PING_EVENT_TYPE. + Undo any connections made in the connect method. """ - self._on_ping() + self.Unbind(_PingEventBinder, handler=self._on_ping)
Call Unbind in the wxPython Pingee implementation The wxPython `Pingee` implementation introduced in #246 has a `Bind` call without a matching `Unbind` call. While we don't have any evidence that this actually causes any problems, in general we probably want to provide an API for the `Pingee` to undo anything it's had to set up. Toolkits can then make use of this or not, as necessary.
enthought/traits-futures
diff --git a/traits_futures/tests/test_pinger.py b/traits_futures/tests/test_pinger.py index 3036953..91ec622 100644 --- a/traits_futures/tests/test_pinger.py +++ b/traits_futures/tests/test_pinger.py @@ -102,14 +102,24 @@ class PingListener(HasStrictTraits): #: Total number of pings received. ping_count = Int(0) + def __enter__(self): + self.connect() + return self + + def __exit__(self, *exc_info): + self.disconnect() + + def connect(self): + self.pingee = Pingee(on_ping=lambda: setattr(self, "ping", True)) + self.pingee.connect() + + def disconnect(self): + self.pingee.disconnect() + self.pingee = None + def _ping_fired(self): self.ping_count += 1 - def _pingee_default(self): - return Pingee( - on_ping=lambda: setattr(self, "ping", True), - ) - class MultipleListeners(HasStrictTraits): """ @@ -137,8 +147,10 @@ class TestPinger(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.listener = PingListener() + self.listener.connect() def tearDown(self): + self.listener.disconnect() del self.listener GuiTestAssistant.tearDown(self) @@ -178,18 +190,17 @@ class TestPinger(GuiTestAssistant, unittest.TestCase): self.assertEqual(self.listener.ping_count, 15) def test_multiple_pingees(self): - listener1 = PingListener() - listener2 = PingListener() - listeners = MultipleListeners(listeners=[listener1, listener2]) - - with BackgroundPinger(listener1.pingee) as pinger1: - with BackgroundPinger(listener2.pingee) as pinger2: - pinger1.ping(3) - pinger2.ping(4) - - self.run_until( - listeners, "ping", lambda obj: obj.ping_count >= 7 - ) + with PingListener() as listener1: + with PingListener() as listener2: + listeners = MultipleListeners(listeners=[listener1, listener2]) + with BackgroundPinger(listener1.pingee) as pinger1: + with BackgroundPinger(listener2.pingee) as pinger2: + pinger1.ping(3) + pinger2.ping(4) + + self.run_until( + listeners, "ping", lambda obj: obj.ping_count >= 7 + ) self.assertEqual(listener1.ping_count, 3) self.assertEqual(listener2.ping_count, 4)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 4 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "click", "setuptools", "pytest" ], "pre_install": [], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
click==8.1.8 exceptiongroup==1.2.2 importlib_metadata==8.6.1 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pyface==8.0.0 pytest==8.3.5 tomli==2.2.1 traits==7.0.2 -e git+https://github.com/enthought/traits-futures.git@60efc6ca839fbcb235ebc886d64fa0ce4ce19fc5#egg=traits_futures zipp==3.21.0
name: traits-futures channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.1.8 - exceptiongroup==1.2.2 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pyface==8.0.0 - pytest==8.3.5 - tomli==2.2.1 - traits==7.0.2 - zipp==3.21.0 prefix: /opt/conda/envs/traits-futures
[ "traits_futures/tests/test_pinger.py::TestPinger::test_background_threads_finish_before_event_loop_starts", "traits_futures/tests/test_pinger.py::TestPinger::test_multiple_background_pingers", "traits_futures/tests/test_pinger.py::TestPinger::test_multiple_background_pings", "traits_futures/tests/test_pinger.py::TestPinger::test_multiple_pingees", "traits_futures/tests/test_pinger.py::TestPinger::test_single_background_ping" ]
[]
[]
[]
BSD 3-Clause "New" or "Revised" License
9,043
1,559
[ "traits_futures/message_router.py", "traits_futures/null/pinger.py", "traits_futures/qt/pinger.py", "traits_futures/wx/pinger.py" ]
sqlfluff__sqlfluff-590
443b132fb3df3d2ef2b2711f0d3a69023c44aaa5
2020-11-26 22:59:31
b6074640b62be3d0d9f9a0e4c5f211420412ad03
alanmcruickshank: I like the thinking around this re-write - but it changes the intent of this rule a bit more. The original requires a newline and to recognize an indent, this new logic just looks for long patches of whitespace (regardless of whether they're at the start of a line). What I'm not sure about is whether this is a bad thing. The potential issues would be: - Handing indents (but these are only really possible with spaces *anyway* so probably not an issue). - Long patches of whitespace *after* code, which it probably for layout reasons. I think the former is probably fine, but I think the latter could behave strangely in cases like this: ``` SELECT a, -- Some comment longer_col -- A lined up comment FROM spam ``` In this example we have extended patches of whitespace *before and after* `a,`. The reason I'm not sure whether this is a problem is because I wonder if the same fixing logic actually applies fine 🤷 . @NiallRees - could you add in a couple of test cases like this with a mixture of tabs and spaces in the before and after slots (and different configs) to make sure we don't behave strangely in these cases now that we're not looking for an explicit newline. If it behaves nicely - then I think this is actually a **feature** rather than a bug and we should document it as such! 😄 NiallRees: > SELECT > a, -- Some comment > longer_col -- A lined up comment > FROM spam Just done some testing with indents after text. I hadn't quite appreciated what text editors did with variable tab size to align comments. I think we're fine though for pre-text indents. This is the current result with indented comments after text: ![image](https://user-images.githubusercontent.com/23722609/101277789-c1c9d800-37ae-11eb-90c5-fe0da616c7e5.png) I think the options are 1. Revert to previous in only trying to convert tabs to spaces which are directly after a newline 2. Introduce more complex text editor style understanding of tabs, and take into account the number of characters from the newline to determine how many space characters a tab should be converted to if it appears after other text. I'm tempted to go with 1 for now as it's still an improvement over the previous rule (rule is triggered when 'wrong' indent used, and will convert from tabs->spaces and vice-versa), and is 'safe' in terms of not messing up formatting. alanmcruickshank: I'm definitely fine with 1 for now. 2 could be fun, but I think it's a lot less important than the other. Using aligned comments after things is a lot less common than indentation in general (perhaps obviously 🤷 ).
diff --git a/src/sqlfluff/core/rules/std.py b/src/sqlfluff/core/rules/std.py index 8fe667776..75fedcdcc 100644 --- a/src/sqlfluff/core/rules/std.py +++ b/src/sqlfluff/core/rules/std.py @@ -689,51 +689,79 @@ class Rule_L003(BaseCrawler): return LintResult(memory=memory) +@std_rule_set.document_fix_compatible +@std_rule_set.document_configuration @std_rule_set.register class Rule_L004(BaseCrawler): - """Mixed Tab and Space indentation found in file. + """Incorrect indentation type. + + Note 1: spaces are only fixed to tabs if the number of spaces in the + indent is an integer multiple of the tab_space_size config. + Note 2: fixes are only applied to indents at the start of a line. Indents + after other text on the same line are not fixed. | **Anti-pattern** - | The • character represents a space and the → character represents a tab. - | In this example, the second line is indented with spaces and the third one with tab. + | Using tabs instead of spaces when indent_unit config set to spaces (default). .. code-block:: - SELECT + select ••••a, → b - FROM foo + from foo | **Best practice** | Change the line to use spaces only. .. code-block:: - SELECT + select ••••a, ••••b - FROM foo + from foo """ - def _eval(self, segment, raw_stack, memory, **kwargs): - """Mixed Tab and Space indentation found in file. + config_keywords = ["indent_unit", "tab_space_size"] - We use the `memory` feature here to keep track of - what we've seen in the past. - - """ - indents_seen = memory.get("indents_seen", set()) - if segment.is_type("whitespace"): - if len(raw_stack) == 0 or raw_stack[-1].is_type("newline"): - indents_here = set(segment.raw) - indents_union = indents_here | indents_seen - memory["indents_seen"] = indents_union - if len(indents_union) > 1: - # We are seeing an indent we haven't seen before and we've seen others before - return LintResult(anchor=segment, memory=memory) - else: - return LintResult(memory=memory) - return LintResult(memory=memory) + # TODO fix indents after text: https://github.com/sqlfluff/sqlfluff/pull/590#issuecomment-739484190 + def _eval(self, segment, raw_stack, **kwargs): + """Incorrect indentation found in file.""" + tab = "\t" + space = " " + correct_indent = ( + space * self.tab_space_size if self.indent_unit == "space" else tab + ) + wrong_indent = ( + tab if self.indent_unit == "space" else space * self.tab_space_size + ) + if segment.is_type("whitespace") and wrong_indent in segment.raw: + description = "Incorrect indentation type found in file." + edit_indent = segment.raw.replace(wrong_indent, correct_indent) + # Ensure that the number of space indents is a multiple of tab_space_size + # before attempting to convert spaces to tabs to avoid mixed indents + # unless we are converted tabs to spaces (indent_unit = space) + if ( + ( + self.indent_unit == "space" + or segment.raw.count(space) % self.tab_space_size == 0 + ) + # Only attempt a fix at the start of a newline for now + and (len(raw_stack) == 0 or raw_stack[-1].is_type("newline")) + ): + fixes = [ + LintFix( + "edit", + segment, + self.make_whitespace( + raw=edit_indent, + pos_marker=segment.pos_marker, + ), + ) + ] + else: + description += " No fix available as number of spaces in indent is not a multiple of tab_space_size." + fixes = [] + return LintResult(anchor=segment, fixes=fixes, description=description) @std_rule_set.document_fix_compatible
Enhance L004 to enforce one indentation style across all files Currently L004 throws an error if there is inconsistent indentation in a file (mixed spaces and tabs). ``` L: 121 | P: 1 | L004 | Mixed Tab and Space indentation found in file. ``` How would we feel about changing this to just enforce either tabs or spaces across all files? I personally would much prefer it as an anti-tabber, the logic would be simpler and a fix would be easy to implement. The indentation preference could just be set in the config.
sqlfluff/sqlfluff
diff --git a/test/core/rules/test_cases/L004.yml b/test/core/rules/test_cases/L004.yml index b22617b32..f3c8fb0ec 100644 --- a/test/core/rules/test_cases/L004.yml +++ b/test/core/rules/test_cases/L004.yml @@ -1,10 +1,106 @@ rule: L004 -test_1: - pass_str: " \nSELECT 1" +spaces_pass_default: + pass_str: " \nSELECT 1" -test_2: - pass_str: "\t\tSELECT 1\n" -test_3: - fail_str: " \n \t \n SELECT 1" +spaces_fail: + fail_str: " \nSELECT 1" + fix_str: "\t\nSELECT 1" + + configs: + rules: + indent_unit: tab + + +spaces_fail_custom_tab_space_size: + fail_str: " \nSELECT 1" + fix_str: "\t\t\nSELECT 1" + + configs: + rules: + indent_unit: tab + tab_space_size: 2 + + +tabs_fail_default: + fail_str: "\t\tSELECT 1\n" + fix_str: " SELECT 1\n" + + +tabs_fail_default_set_tab_space_size: + fail_str: "\t\tSELECT 1\n" + fix_str: " SELECT 1\n" + + configs: + rules: + tab_space_size: 2 + + +tabs_pass: + pass_str: "\tSELECT 1" + + configs: + rules: + indent_unit: tab + + +mixed_indent_fail_default_tab_space_size: + fail_str: |2 + select 1 + fix_str: |2 + select 1 + + +mixed_indent_fail_custom_tab_space_size: + fail_str: |2 + select 1 + fix_str: |2 + select 1 + + configs: + rules: + tab_space_size: 2 + + +indented_comments: + pass_str: | + SELECT + a, -- Some comment + longer_col -- A lined up comment + FROM spam + +indented_comments_default_config: + fail_str: | + SELECT + a, -- Some comment + longer_col -- A lined up comment + FROM spam + + # The rule will only fix the indent before the select targets. + fix_str: | + SELECT + a, -- Some comment + longer_col -- A lined up comment + FROM spam + + +indented_comments_tab_config: + fail_str: | + SELECT + a, -- Some comment + longer_col -- A lined up comment + FROM spam + + # The rule will only fix the indent before the select targets. + fix_str: | + SELECT + a, -- Some comment + longer_col -- A lined up comment + FROM spam + + configs: + rules: + indent_unit: tab + +# TODO fix indents after text: https://github.com/sqlfluff/sqlfluff/issues/624
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt", "requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 bench-it==1.0.1 black==25.1.0 cached-property==2.0.1 click==8.1.8 colorama==0.4.6 configparser==7.2.0 coverage==7.8.0 dataclasses==0.6 diff-cover==2.6.1 doc8==1.1.2 docutils==0.21.2 exceptiongroup==1.2.2 flake8==7.2.0 flake8-black==0.3.6 flake8-docstrings==1.7.0 importlib_metadata==8.6.1 inflect==7.5.0 iniconfig==2.1.0 Jinja2==3.1.6 jinja2-pluralize==0.3.0 MarkupSafe==3.0.2 mccabe==0.7.0 more-itertools==10.6.0 mypy==1.15.0 mypy-extensions==1.0.0 oyaml==1.0 packaging==24.2 pathspec==0.12.1 pbr==6.1.1 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pydocstyle==6.3.0 pyflakes==3.3.1 Pygments==2.19.1 pytest==8.3.5 pytest-cov==6.0.0 PyYAML==6.0.2 restructuredtext_lint==1.4.0 six==1.17.0 snowballstemmer==2.2.0 -e git+https://github.com/sqlfluff/sqlfluff.git@443b132fb3df3d2ef2b2711f0d3a69023c44aaa5#egg=sqlfluff stevedore==5.4.1 tomli==2.2.1 typeguard==4.4.2 typing_extensions==4.13.0 zipp==3.21.0
name: sqlfluff channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - bench-it==1.0.1 - black==25.1.0 - cached-property==2.0.1 - click==8.1.8 - colorama==0.4.6 - configparser==7.2.0 - coverage==7.8.0 - dataclasses==0.6 - diff-cover==2.6.1 - doc8==1.1.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - flake8==7.2.0 - flake8-black==0.3.6 - flake8-docstrings==1.7.0 - importlib-metadata==8.6.1 - inflect==7.5.0 - iniconfig==2.1.0 - jinja2==3.1.6 - jinja2-pluralize==0.3.0 - markupsafe==3.0.2 - mccabe==0.7.0 - more-itertools==10.6.0 - mypy==1.15.0 - mypy-extensions==1.0.0 - oyaml==1.0 - packaging==24.2 - pathspec==0.12.1 - pbr==6.1.1 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pydocstyle==6.3.0 - pyflakes==3.3.1 - pygments==2.19.1 - pytest==8.3.5 - pytest-cov==6.0.0 - pyyaml==6.0.2 - restructuredtext-lint==1.4.0 - six==1.17.0 - snowballstemmer==2.2.0 - stevedore==5.4.1 - tomli==2.2.1 - typeguard==4.4.2 - typing-extensions==4.13.0 - zipp==3.21.0 prefix: /opt/conda/envs/sqlfluff
[ "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_spaces_fail]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_spaces_fail_custom_tab_space_size]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_tabs_fail_default]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_tabs_fail_default_set_tab_space_size]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_mixed_indent_fail_default_tab_space_size]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_mixed_indent_fail_custom_tab_space_size]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_indented_comments_default_config]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_indented_comments_tab_config]" ]
[ "test/core/rules/std_test.py::test__rules__std_file_dbt[L021-models/my_new_project/select_distinct_group_by.sql-violations0]", "test/core/templaters/dbt_test.py::test__templater_dbt_profiles_dir_expanded", "test/core/templaters/dbt_test.py::test__templater_dbt_templating_result[use_dbt_utils.sql]", "test/core/templaters/dbt_test.py::test__templater_dbt_templating_result[macro_in_macro.sql]", "test/core/templaters/dbt_test.py::test__templater_dbt_templating_result[use_headers.sql]", "test/core/templaters/dbt_test.py::test__templater_dbt_templating_result[use_var.sql]", "test/core/templaters/dbt_test.py::test__templater_dbt_templating_absolute_path", "test/core/templaters/dbt_test.py::test__templater_dbt_handle_exceptions[compiler_error.sql-dbt", "test/core/templaters/dbt_test.py::test__templater_dbt_handle_exceptions[exception_connect_database.sql-dbt", "test/diff_quality_plugin_test.py::test_diff_quality_plugin[test/fixtures/linter/diffquality/parse_error.sql-expected_violations_lines2]" ]
[ "test/api/classes_test.py::test__api__lexer", "test/api/classes_test.py::test__api__parser", "test/api/classes_test.py::test__api__linter_lint", "test/api/classes_test.py::test__api__linter_fix", "test/api/simple_test.py::test__api__lint_string", "test/api/simple_test.py::test__api__lint_file", "test/api/simple_test.py::test__api__lint_string_specific", "test/api/simple_test.py::test__api__fix_string", "test/api/simple_test.py::test__api__fix_string_specific", "test/api/simple_test.py::test__api__parse_string", "test/cli/commands_test.py::test__cli__command_directed", "test/cli/commands_test.py::test__cli__command_dialect", "test/cli/commands_test.py::test__cli__command_lint_stdin[command0]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command1]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command2]", "test/cli/commands_test.py::test__cli__command_lint_stdin[command3]", "test/cli/commands_test.py::test__cli__command_lint_parse[command0]", "test/cli/commands_test.py::test__cli__command_lint_parse[command1]", "test/cli/commands_test.py::test__cli__command_lint_parse[command2]", "test/cli/commands_test.py::test__cli__command_lint_parse[command3]", "test/cli/commands_test.py::test__cli__command_lint_parse[command4]", "test/cli/commands_test.py::test__cli__command_lint_parse[command5]", "test/cli/commands_test.py::test__cli__command_lint_parse[command6]", "test/cli/commands_test.py::test__cli__command_lint_parse[command7]", "test/cli/commands_test.py::test__cli__command_lint_parse[command8]", "test/cli/commands_test.py::test__cli__command_lint_parse[command9]", "test/cli/commands_test.py::test__cli__command_lint_parse[command10]", "test/cli/commands_test.py::test__cli__command_lint_parse[command11]", "test/cli/commands_test.py::test__cli__command_lint_parse[command12]", "test/cli/commands_test.py::test__cli__command_lint_parse[command13]", "test/cli/commands_test.py::test__cli__command_lint_parse[command14]", "test/cli/commands_test.py::test__cli__command_lint_parse[command15]", "test/cli/commands_test.py::test__cli__command_lint_parse[command16]", "test/cli/commands_test.py::test__cli__command_lint_parse[command17]", "test/cli/commands_test.py::test__cli__command_lint_parse[command18]", "test/cli/commands_test.py::test__cli__command_lint_parse[command19]", "test/cli/commands_test.py::test__cli__command_lint_parse[command20]", "test/cli/commands_test.py::test__cli__command_lint_parse[command21]", "test/cli/commands_test.py::test__cli__command_lint_parse[command22]", "test/cli/commands_test.py::test__cli__command_lint_warning_explicit_file_ignored", "test/cli/commands_test.py::test__cli__command_lint_skip_ignore_files", "test/cli/commands_test.py::test__cli__command_versioning", "test/cli/commands_test.py::test__cli__command_version", "test/cli/commands_test.py::test__cli__command_rules", "test/cli/commands_test.py::test__cli__command_dialects", "test/cli/commands_test.py::test__cli__command__fix[L001-test/fixtures/linter/indentation_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/whitespace_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/indentation_errors.sql]", "test/cli/commands_test.py::test__cli__command__fix[L003-test/fixtures/linter/indentation_error_hard.sql]", "test/cli/commands_test.py::test__cli__command_fix_stdin[select", "test/cli/commands_test.py::test__cli__command_fix_stdin[", "test/cli/commands_test.py::test__cli__command_fix_stdin[SELECT", "test/cli/commands_test.py::test__cli__command_fix_stdin_safety", "test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-y-0]", "test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-n-65]", "test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[yaml]", "test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[json]", "test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[select", "test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[SElect", "test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command0]", "test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command1]", "test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[yaml]", "test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[json]", "test/cli/commands_test.py::test___main___help", "test/cli/formatters_test.py::test__cli__formatters__filename_nocol", "test/cli/formatters_test.py::test__cli__formatters__violation", "test/cli/helpers_test.py::test__cli__helpers__colorize", "test/cli/helpers_test.py::test__cli__helpers__cli_table", "test/cli/helpers_test.py::test__cli__helpers__wrap_elem[abc-5-res0]", "test/cli/helpers_test.py::test__cli__helpers__wrap_elem[how", "test/cli/helpers_test.py::test__cli__helpers__wrap_elem[A", "test/cli/helpers_test.py::test__cli__helpers__wrap_field_a", "test/cli/helpers_test.py::test__cli__helpers__wrap_field_b", "test/cli/helpers_test.py::test__cli__helpers__wrap_field_c", "test/cli/helpers_test.py::test__cli__helpers__pad_line", "test/core/config_test.py::test__config__nested_combine", "test/core/config_test.py::test__config__dict_diff", "test/core/config_test.py::test__config__load_file_dir", "test/core/config_test.py::test__config__load_file_f", "test/core/config_test.py::test__config__load_nested", "test/core/config_test.py::test__config__iter_config_paths_right_order", "test/core/config_test.py::test__config__find_sqlfluffignore_in_same_directory", "test/core/config_test.py::test__config__nested_config_tests", "test/core/dialects/ansi_test.py::test__dialect__ansi__file_lex[a", "test/core/dialects/ansi_test.py::test__dialect__ansi__file_lex[b.c-res1]", "test/core/dialects/ansi_test.py::test__dialect__ansi__file_lex[abc", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectKeywordSegment-select]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[NakedIdentifierSegment-online_sales]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[BareFunctionSegment-current_timestamp]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[FunctionSegment-current_timestamp()]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[NumericLiteralSegment-1000.0]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-online_sales", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[IntervalExpressionSegment-INTERVAL", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-CASE", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-CAST(ROUND(online_sales", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-name", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-MIN", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-DATE_ADD(CURRENT_DATE('America/New_York'),", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-my_array[1]]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-my_array[OFFSET(1)]]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-my_array[5:8]]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-4", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-bits[OFFSET(0)]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-(count_18_24", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-count_18_24", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectStatementSegment-SELECT", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-t.val/t.id]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-CAST(num", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-a.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-a.b.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-a.b.c.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ObjectReferenceSegment-a..c.*]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment--some_variable]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment--", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-concat(left(uaid,", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-c", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[ExpressionSegment-NULL::INT]", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_parses[SelectTargetElementSegment-NULL::INT", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_not_match[ObjectReferenceSegment-\\n", "test/core/dialects/ansi_test.py::test__dialect__ansi_specific_segment_not_parse[SELECT", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-update_from.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-select_stmt_cast.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-select_stmt.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-create_table_stmt_3.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-create_table_stmt_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-create_table_stmt.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-collect_stats.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[teradata-bteq_stmt.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_window_function_ignore_nulls.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_string_literal.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_semi_structured_3.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_semi_structured_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_semi_structured.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_qualify.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_pivot.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_json_underscore_key.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[snowflake-snowflake_col_position.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[postgres-postgres_within_group.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[mysql-alter_table.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-string_literals.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_with_offset.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_replace_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_replace.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_quoting.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_multi_except.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_ml_weights.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_ml_predict_with_select.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_for_system_time_2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_for_system_time.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_except.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_example.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-select_datetime.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[bigquery-interval_function.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-update.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-table_expression.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-shorthand_cast.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_with_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_with_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_where_in_unnest.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_v.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_u.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_trailing_comma_column_list.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_t.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_j.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_i.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_h.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_g.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_f.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_e.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_d.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_c.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_simple_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_s.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_right.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_r.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_q.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_p.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_o.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_n.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_m.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_l.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_j.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_h.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_g.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_function_in_group_by.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_f.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_e.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_d.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_case_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_case_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_c.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-select_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback_work_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback_work.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-rollback.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-revoke_select_on_table_a_from_group_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-multi_statement_c.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-multi_statement_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-multi_statement_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-modulo.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-insert_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_update_on_all_tables_in_schema_a_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_update_insert_on_mytable_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_on_mytable_to_public_with_grant_option.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_on_mytable_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_select_col1_col2_update_col1_on_mytable_to_public.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_all_privileges_on_mytable_to_role.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_all_on_table_mytable_to_role.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-grant_all_on_mytable_to_role.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-functions_b.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-functions_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-escaped_quotes.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-escape.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-empty_file.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_view_a_restrict.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_view_a_cascade.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_view_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_if_exists_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_a_restrict.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_a_cascade.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_table_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-drop_model.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-delete_from.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_view_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_varchar.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_table_comment.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_column_comment.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_auto_increment.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_as.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_a_pk_unique_fk_constraints.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_a_column_constraints.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_table_a_c1_c2.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-create_model_options.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit_work_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit_work.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit_and_no_chain.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-commit.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_file_parse[ansi-arithmetic_a.sql]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[snowflake-snowflake_semi_structured.sql-True-snowflake_semi_structured.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[snowflake-snowflake_pivot.sql-True-snowflake_pivot.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[bigquery-select_replace_2.sql-True-select_replace_2.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_with_a.sql-True-select_with_a.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_j.sql-True-select_simple_j.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_e.sql-True-select_simple_e.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_e.sql-False-select_simple_e_nc.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_b.sql-True-select_simple_b.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_simple_a.sql-True-select_simple_a.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_e.sql-True-select_e.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-select_a.sql-True-select_a.yml]", "test/core/dialects/dialects_test.py::test__dialect__base_parse_struct[ansi-multi_statement_a.sql-False-multi_statement_a_nc.yml]", "test/core/dialects/postgres_test.py::test_epoch_datetime_unit[SELECT", "test/core/dialects/snowflake_test.py::test_snowflake_queries[UseStatementSegment-USE", "test/core/linter_test.py::test__linter__path_from_paths__dir", "test/core/linter_test.py::test__linter__path_from_paths__file", "test/core/linter_test.py::test__linter__path_from_paths__not_exist", "test/core/linter_test.py::test__linter__path_from_paths__not_exist_ignore", "test/core/linter_test.py::test__linter__path_from_paths__explicit_ignore", "test/core/linter_test.py::test__linter__path_from_paths__dot", "test/core/linter_test.py::test__linter__path_from_paths__ignore[test/fixtures/linter/sqlfluffignore]", "test/core/linter_test.py::test__linter__path_from_paths__ignore[test/fixtures/linter/sqlfluffignore/]", "test/core/linter_test.py::test__linter__path_from_paths__ignore[test/fixtures/linter/sqlfluffignore/.]", "test/core/linter_test.py::test__linter__lint_string_vs_file[test/fixtures/linter/indentation_errors.sql]", "test/core/linter_test.py::test__linter__lint_string_vs_file[test/fixtures/linter/whitespace_errors.sql]", "test/core/linter_test.py::test__linter__get_violations_filter_rules[None-7]", "test/core/linter_test.py::test__linter__get_violations_filter_rules[L010-2]", "test/core/linter_test.py::test__linter__get_violations_filter_rules[rules2-2]", "test/core/linter_test.py::test__linter__linting_result__sum_dicts", "test/core/linter_test.py::test__linter__linting_result__combine_dicts", "test/core/linter_test.py::test__linter__linting_result_check_tuples_by_path[False-list]", "test/core/linter_test.py::test__linter__linting_result_check_tuples_by_path[True-dict]", "test/core/linter_test.py::test__linter__linting_result_get_violations", "test/core/linter_test.py::test__linter__raises_malformed_noqa", "test/core/linter_test.py::test__linter__empty_file", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice0-matcher_keywords0-False-result_slice0]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice1-matcher_keywords1-True-result_slice1]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice2-matcher_keywords2-False-None]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__basic[seg_list_slice3-matcher_keywords3-True-result_slice3]", "test/core/parser/grammar_test.py::test__parser__grammar__base__longest_trimmed_match__adv", "test/core/parser/grammar_test.py::test__parser__grammar__base__look_ahead_match[seg_list_slice0-matcher_keywords0-result_slice0-bar-None]", "test/core/parser/grammar_test.py::test__parser__grammar__base__look_ahead_match[seg_list_slice1-matcher_keywords1-result_slice1-foo-pre_match_slice1]", "test/core/parser/grammar_test.py::test__parser__grammar__base__ephemeral_segment", "test/core/parser/grammar_test.py::test__parser__grammar__base__bracket_sensitive_look_ahead_match", "test/core/parser/grammar_test.py::test__parser__grammar_oneof[True]", "test/core/parser/grammar_test.py::test__parser__grammar_oneof[False]", "test/core/parser/grammar_test.py::test__parser__grammar_oneof_exclude", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_a[baar-False]", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_a[bar-True]", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_b[False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_startswith_b[True-4]", "test/core/parser/grammar_test.py::test__parser__grammar_sequence", "test/core/parser/grammar_test.py::test__parser__grammar_sequence_nested", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list0-None-True-False-5]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list1-None-True-False-6]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list2-None-True-False-0]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list3-None-True-True-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list4-0-True-False-5]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list5-0-False-False-1]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list6-1-True-False-5]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list7-1-False-False-0]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list8-None-True-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list9-None-False-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list10-1-True-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_delimited[token_list11-1-False-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[foo-False-1]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[bar-False-0]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[baar-False-3]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil[baar-True-5]", "test/core/parser/grammar_test.py::test__parser__grammar_greedyuntil_bracketed", "test/core/parser/grammar_test.py::test__parser__grammar_anything", "test/core/parser/grammar_test.py::test__parser__grammar_nothing", "test/core/parser/grammar_test.py::test__parser__grammar_noncode", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list0-0-3-0]", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list1-0-3-0]", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list2-0-0-0]", "test/core/parser/helpers_test.py::test__parser__helper_trim_non_code_segments[token_list3-3-3-3]", "test/core/parser/lexer_test.py::test__parser__lexer_obj[a", "test/core/parser/lexer_test.py::test__parser__lexer_obj[b.c-res1]", "test/core/parser/lexer_test.py::test__parser__lexer_obj[abc", "test/core/parser/lexer_test.py::test__parser__lexer_obj[abc'\\n", "test/core/parser/lexer_test.py::test__parser__lexer_obj[*-+bd/-res8]", "test/core/parser/lexer_test.py::test__parser__lexer_obj[2+4", "test/core/parser/lexer_test.py::test__parser__lexer_obj[when", "test/core/parser/lexer_test.py::test__parser__lexer_singleton[.fsaljk-.]", "test/core/parser/lexer_test.py::test__parser__lexer_singleton[fsaljk-None]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[fsaljk-f-f0]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[fsaljk-f-f1]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[fsaljk-[fas]*-fsa]", "test/core/parser/lexer_test.py::test__parser__lexer_regex[", "test/core/parser/lexer_test.py::test__parser__lexer_regex['something", "test/core/parser/lexer_test.py::test__parser__lexer_regex['", "test/core/parser/lexer_test.py::test__parser__lexer_multimatcher", "test/core/parser/lexer_test.py::test__parser__lexer_fail", "test/core/parser/lexer_test.py::test__parser__lexer_fail_via_parse", "test/core/parser/markers_test.py::test__markers__common_marker", "test/core/parser/markers_test.py::test__markers__common_marker_format", "test/core/parser/markers_test.py::test__markers__enriched_marker_format", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>0-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>0-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>1-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>1-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>2-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>2-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>3-from_unmatched]", "test/core/parser/match_test.py::test__parser__match_construct[<lambda>3-from_matched]", "test/core/parser/match_test.py::test__parser__match_construct_from_empty", "test/core/parser/match_test.py::test__parser__match_add[<lambda>0]", "test/core/parser/match_test.py::test__parser__match_add[<lambda>1]", "test/core/parser/match_test.py::test__parser__match_add[<lambda>2]", "test/core/parser/match_test.py::test__parser__match_add[<lambda>3]", "test/core/parser/match_test.py::test__parser__match_add_raises[string]", "test/core/parser/match_test.py::test__parser__match_add_raises[fail_case1]", "test/core/parser/match_test.py::test__parser__match_add_raises[fail_case2]", "test/core/parser/match_test.py::test__parser__match_add_raises[fail_case3]", "test/core/parser/parse_test.py::test__parser__parse_match", "test/core/parser/parse_test.py::test__parser__parse_parse", "test/core/parser/parse_test.py::test__parser__parse_expand", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw_init", "test/core/parser/segments_base_test.py::test__parser__base_segments_type", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw", "test/core/parser/segments_base_test.py::test__parser__base_segments_base", "test/core/parser/segments_base_test.py::test__parser__base_segments_raw_compare", "test/core/parser/segments_base_test.py::test__parser__base_segments_base_compare", "test/core/parser/segments_common_test.py::test__parser__core_keyword", "test/core/parser/segments_common_test.py::test__parser__core_ephemeral_segment", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L001_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L002_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_8]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_9]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_10]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_11]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L003_test_12]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_spaces_pass_default]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_tabs_pass]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L004_indented_comments]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L005_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L006_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L008_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L008_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L008_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L010_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L011_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L012_issue_561]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L013_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L014_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L015_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L016_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L018_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_comma_violations]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_commas_allowed]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_comma_violations_in_with_statement]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_commas_allowed_in_with_statement]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_trailing_comma_violations]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_trailing_commas_allowed]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_trailing_comma_fixing_removes_extra_whitespace]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L019_leading_comma_fixing_flows_around_comments]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L020_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L021_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L021_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L022_test_8]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L023_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L024_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L024_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L025_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L025_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L025_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L026_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L028_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L029_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L030_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L030_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_alias_single_char_identifiers]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_alias_with_wildcard_identifier]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_select_from_values]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L031_select_from_table_generator]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L032_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L032_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L032_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L033_test_7]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_1]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_2]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_3]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_4]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_5]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L034_test_6]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L036_test_single_select_target_and_no_new_line_between_select_and_select_target]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L036_test_single_select_target_and_new_line_after_select_target]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L036_test_multiple_select_targets_on_new_lines_and_new_line_after_select]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L036_test_single_select_target_and_new_line_between_select_and_select_target]", "test/core/rules/rule_test_cases_test.py::test__rule_test_case[L036_test_mulitple_select_targets_all_on_the_same_line]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[snowflake-001_semi_structured]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[bigquery-004_templating]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[bigquery-003_templating]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[bigquery-002_templating]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[bigquery-001_templating]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-012_templating]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-011_indentation]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-010_CTEs_and_newlines]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-009_keyword_capitalisation]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-008_with_clause]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-007_with_clause]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-006_indentation]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-005_function_spacing]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-004_indentation]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-003_long_line]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-002_indentation]", "test/core/rules/std_fix_auto_test.py::test__std_fix_auto[ansi-001_long_line]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L001-test/fixtures/linter/indentation_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L008-test/fixtures/linter/whitespace_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L008-test/fixtures/linter/indentation_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L010-test/fixtures/linter/whitespace_errors.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L011-test/fixtures/parser/ansi/select_simple_i.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix[L012-test/fixtures/parser/ansi/select_simple_i.sql]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix_templated[L010]", "test/core/rules/std_roundtrip_test.py::test__cli__command__fix_templated[L001]", "test/core/rules/std_test.py::test__rules__user_rules", "test/core/rules/std_test.py::test__rules__runaway_fail_catch", "test/core/rules/std_test.py::test__rules__std_file[L001-test/fixtures/linter/indentation_errors.sql-violations0]", "test/core/rules/std_test.py::test__rules__std_file[L002-test/fixtures/linter/indentation_errors.sql-violations1]", "test/core/rules/std_test.py::test__rules__std_file[L003-test/fixtures/linter/indentation_errors.sql-violations2]", "test/core/rules/std_test.py::test__rules__std_file[L004-test/fixtures/linter/indentation_errors.sql-violations3]", "test/core/rules/std_test.py::test__rules__std_file[L005-test/fixtures/linter/whitespace_errors.sql-violations4]", "test/core/rules/std_test.py::test__rules__std_file[L019-test/fixtures/linter/whitespace_errors.sql-violations5]", "test/core/rules/std_test.py::test__rules__std_file[L008-test/fixtures/linter/whitespace_errors.sql-violations6]", "test/core/rules/std_test.py::test__rules__std_file[L006-test/fixtures/linter/operator_errors.sql-violations7]", "test/core/rules/std_test.py::test__rules__std_file[L007-test/fixtures/linter/operator_errors.sql-violations8]", "test/core/rules/std_test.py::test__rules__std_file[L006-test/fixtures/linter/operator_errors_negative.sql-violations9]", "test/core/rules/std_test.py::test__rules__std_file[L003-test/fixtures/linter/indentation_error_hard.sql-violations10]", "test/core/rules/std_test.py::test__rules__std_file[L003-test/fixtures/linter/indentation_error_contained.sql-violations11]", "test/core/rules/std_test.py::test__rules__std_file[L016-test/fixtures/linter/block_comment_errors.sql-violations12]", "test/core/rules/std_test.py::test__rules__std_file[L016-test/fixtures/linter/block_comment_errors_2.sql-violations13]", "test/core/rules/std_test.py::test__rules__std_file[L027-test/fixtures/linter/column_references.sql-violations14]", "test/core/rules/std_test.py::test__rules__std_file[L027-test/fixtures/linter/column_references_bare_function.sql-violations15]", "test/core/rules/std_test.py::test__rules__std_file[L026-test/fixtures/linter/column_references.sql-violations16]", "test/core/rules/std_test.py::test__rules__std_file[L025-test/fixtures/linter/column_references.sql-violations17]", "test/core/rules/std_test.py::test__rules__std_file[L021-test/fixtures/linter/select_distinct_group_by.sql-violations18]", "test/core/rules/std_test.py::test__rules__std_file[L006-test/fixtures/linter/operator_errors_ignore.sql-violations19]", "test/core/rules/std_test.py::test__rules__std_file[L031-test/fixtures/linter/aliases_in_join_error.sql-violations20]", "test/core/rules/std_test.py::test__rules__std_L003_process_raw_stack", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict0]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict1]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict2]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict3]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict4]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict5]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict6]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict7]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict8]", "test/core/rules/std_test.py::test_improper_configs_are_rejected[rule_config_dict9]", "test/core/rules/std_test.py::test_rules_cannot_be_instantiated_without_declared_configs", "test/core/rules/std_test.py::test_rules_configs_are_dynamically_documented", "test/core/rules/std_test.py::test_rule_exception_is_caught_to_validation", "test/core/string_helpers_test.py::test__parser__helper_findall[--positions0]", "test/core/string_helpers_test.py::test__parser__helper_findall[a-a-positions1]", "test/core/string_helpers_test.py::test__parser__helper_findall[foobar-o-positions2]", "test/core/string_helpers_test.py::test__parser__helper_findall[bar", "test/core/templaters/base_test.py::test__indices_of_newlines[-positions0]", "test/core/templaters/base_test.py::test__indices_of_newlines[foo-positions1]", "test/core/templaters/base_test.py::test__indices_of_newlines[foo\\nbar-positions2]", "test/core/templaters/base_test.py::test__indices_of_newlines[\\nfoo\\n\\nbar\\nfoo\\n\\nbar\\n-positions3]", "test/core/templaters/base_test.py::test__templater_selection", "test/core/templaters/base_test.py::test__templater_raw", "test/core/templaters/base_test.py::test__templated_file_get_line_pos_of_char_pos[01234\\n6789{{foo}}fo\\nbarss-01234\\n6789x\\nfo\\nbarfss-file_slices0-0-1-1]", "test/core/templaters/base_test.py::test__templated_file_get_line_pos_of_char_pos[01234\\n6789{{foo}}fo\\nbarss-01234\\n6789x\\nfo\\nbarfss-file_slices1-20-3-1]", "test/core/templaters/base_test.py::test__templated_file_get_line_pos_of_char_pos[01234\\n6789{{foo}}fo\\nbarss-01234\\n6789x\\nfo\\nbarfss-file_slices2-24-3-5]", "test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[100-True-file_slices0-10-11]", "test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[13-True-file_slices1-0-3]", "test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[28-True-file_slices2-2-5]", "test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[12-True-file_slices3-1-3]", "test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[20-True-file_slices4-2-3]", "test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[13-False-file_slices5-0-1]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice0-out_slice0-True-file_slices0-raw_slices0]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice1-out_slice1-True-file_slices1-raw_slices1]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice2-out_slice2-True-file_slices2-raw_slices2]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice3-out_slice3-False-file_slices3-raw_slices3]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice4-out_slice4-False-file_slices4-raw_slices4]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice5-out_slice5-True-file_slices5-raw_slices5]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice6-out_slice6-True-file_slices6-raw_slices6]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice7-out_slice7-True-file_slices7-raw_slices7]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice8-out_slice8-True-file_slices8-raw_slices8]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice9-out_slice9-True-file_slices9-raw_slices9]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice10-out_slice10-True-file_slices10-raw_slices10]", "test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice11-out_slice11-False-file_slices11-raw_slices11]", "test/core/templaters/base_test.py::test__templated_file_source_only_slices", "test/core/templaters/dbt_test.py::test__templater_dbt_missing", "test/core/templaters/jinja_test.py::test__templater_jinja", "test/core/templaters/jinja_test.py::test__templater_jinja_error_variable", "test/core/templaters/jinja_test.py::test__templater_jinja_error_syntax", "test/core/templaters/jinja_test.py::test__templater_jinja_error_catatrophic", "test/core/templaters/jinja_test.py::test__templater_full[jinja_a/jinja-True]", "test/core/templaters/jinja_test.py::test__templater_full[jinja_b/jinja-False]", "test/core/templaters/jinja_test.py::test__templater_full[jinja_c_dbt/dbt_builtins-True]", "test/core/templaters/jinja_test.py::test__templater_full[jinja_e/jinja-True]", "test/core/templaters/jinja_test.py::test__templater_full[jinja_f/jinja-True]", "test/core/templaters/jinja_test.py::test__templater_full[jinja_g_macros/jinja-True]", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[-result0]", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[foo-result1]", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[foo", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[SELECT", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[--result0]", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[foo-foo-result1]", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[SELECT", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[{{", "test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[SELECT\\n", "test/core/templaters/python_test.py::test__templater_python", "test/core/templaters/python_test.py::test__templater_python_error", "test/core/templaters/python_test.py::test__templater_python_intermediate__trim[int_slice0-foo-head_test0-tail_test0-int_test0]", "test/core/templaters/python_test.py::test__templater_python_intermediate__trim[int_slice1-foo-head_test1-tail_test1-int_test1]", "test/core/templaters/python_test.py::test__templater_python_intermediate__trim[int_slice2-foo1bar-head_test2-tail_test2-int_test2]", "test/core/templaters/python_test.py::test__templater_python_intermediate__trim[int_slice3-foofoofoobarfoofoobarbar-head_test3-tail_test3-int_test3]", "test/core/templaters/python_test.py::test__templater_python_substring_occurances[-substrings0-positions0]", "test/core/templaters/python_test.py::test__templater_python_substring_occurances[a-substrings1-positions1]", "test/core/templaters/python_test.py::test__templater_python_substring_occurances[foobar-substrings2-positions2]", "test/core/templaters/python_test.py::test__templater_python_substring_occurances[bar", "test/core/templaters/python_test.py::test__templater_python_sorted_occurance_tuples[test0-result0]", "test/core/templaters/python_test.py::test__templater_python_sorted_occurance_tuples[test1-result1]", "test/core/templaters/python_test.py::test__templater_python_sorted_occurance_tuples[test2-result2]", "test/core/templaters/python_test.py::test__templater_python_slice_template[-result0]", "test/core/templaters/python_test.py::test__templater_python_slice_template[foo-result1]", "test/core/templaters/python_test.py::test__templater_python_slice_template[foo", "test/core/templaters/python_test.py::test__templater_python_split_invariants[raw_sliced0-literals0-raw_occurances0-templated_occurances0-0-result0]", "test/core/templaters/python_test.py::test__templater_python_split_invariants[raw_sliced1-literals1-raw_occurances1-templated_occurances1-3-result1]", "test/core/templaters/python_test.py::test__templater_python_split_uniques_coalesce_rest[split_file0-raw_occurances0-templated_occurances0--result0]", "test/core/templaters/python_test.py::test__templater_python_split_uniques_coalesce_rest[split_file1-raw_occurances1-templated_occurances1-foo-result1]", "test/core/templaters/python_test.py::test__templater_python_split_uniques_coalesce_rest[split_file2-raw_occurances2-templated_occurances2-SELECT", "test/core/templaters/python_test.py::test__templater_python_slice_file[--result0]", "test/core/templaters/python_test.py::test__templater_python_slice_file[foo-foo-result1]", "test/core/templaters/python_test.py::test__templater_python_slice_file[SELECT", "test/diff_quality_plugin_test.py::test_diff_quality_plugin[test/fixtures/linter/indentation_errors.sql-expected_violations_lines0]", "test/diff_quality_plugin_test.py::test_diff_quality_plugin[test/fixtures/linter/parse_error.sql-expected_violations_lines1]" ]
[]
MIT License
9,044
1,030
[ "src/sqlfluff/core/rules/std.py" ]
m3dev__gokart-147
5a10506e5ef762d384fec1651e9cb56daa276336
2020-11-29 01:30:47
7056c10cf4577d7008b5744c5cf127da5b65c849
diff --git a/gokart/redis_lock.py b/gokart/redis_lock.py index d371b67..a287fd0 100644 --- a/gokart/redis_lock.py +++ b/gokart/redis_lock.py @@ -11,11 +11,11 @@ logger = getLogger(__name__) class RedisParams(NamedTuple): - redis_host: str = None - redis_port: str = None - redis_timeout: int = None - redis_key: str = None - should_redis_lock: bool = 180 + redis_host: str + redis_port: str + redis_timeout: int + redis_key: str + should_redis_lock: bool class RedisClient: diff --git a/gokart/target.py b/gokart/target.py index dc2cad1..c9d890c 100644 --- a/gokart/target.py +++ b/gokart/target.py @@ -77,7 +77,7 @@ class SingleFileTarget(TargetOnKart): self, target: luigi.target.FileSystemTarget, processor: FileProcessor, - redis_params: RedisParams = RedisParams(), + redis_params: RedisParams, ) -> None: self._target = target self._processor = processor @@ -115,7 +115,7 @@ class ModelTarget(TargetOnKart): temporary_directory: str, load_function, save_function, - redis_params: RedisParams = RedisParams(), + redis_params: RedisParams, ) -> None: self._zip_client = make_zip_client(file_path, temporary_directory) self._temporary_directory = temporary_directory diff --git a/gokart/task.py b/gokart/task.py index 4aa99b2..15b3b62 100644 --- a/gokart/task.py +++ b/gokart/task.py @@ -61,8 +61,7 @@ class TaskOnKart(luigi.Task): self._rerun_state = self.rerun def output(self): - file_path = self.__module__.replace(".", "/") - return self.make_target(os.path.join(file_path, f"{type(self).__name__}.pkl")) + return self.make_target() def requires(self): tasks = self.make_task_instance_dictionary() @@ -131,8 +130,10 @@ class TaskOnKart(luigi.Task): return cls(**new_k) - def make_target(self, relative_file_path: str, use_unique_id: bool = True, processor: Optional[FileProcessor] = None) -> TargetOnKart: - file_path = os.path.join(self.workspace_directory, relative_file_path) + def make_target(self, relative_file_path: str = None, use_unique_id: bool = True, processor: Optional[FileProcessor] = None) -> TargetOnKart: + formatted_relative_file_path = relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace(".", "/"), + f"{type(self).__name__}.pkl") + file_path = os.path.join(self.workspace_directory, formatted_relative_file_path) unique_id = self.make_unique_id() if use_unique_id else None return gokart.target.make_target(file_path=file_path, unique_id=unique_id, @@ -141,8 +142,10 @@ class TaskOnKart(luigi.Task): redis_port=self.redis_port, redis_timeout=self.redis_timeout) - def make_large_data_frame_target(self, relative_file_path: str, use_unique_id: bool = True, max_byte=int(2**26)) -> TargetOnKart: - file_path = os.path.join(self.workspace_directory, relative_file_path) + def make_large_data_frame_target(self, relative_file_path: str = None, use_unique_id: bool = True, max_byte=int(2**26)) -> TargetOnKart: + formatted_relative_file_path = relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace(".", "/"), + f"{type(self).__name__}.zip") + file_path = os.path.join(self.workspace_directory, formatted_relative_file_path) unique_id = self.make_unique_id() if use_unique_id else None return gokart.target.make_model_target(file_path=file_path, temporary_directory=self.local_temporary_directory,
Default path for make_large_data_frame_target In the same manner as `output()`、it might be great if `make_large_data_frame_target()` can produce default path. ``` file_path = self.__module__.replace('.', '/') return self.make_large_data_frame_target(os.path.join(file_path, f'{type(self).__name__}.zip')) ```
m3dev/gokart
diff --git a/test/test_task_on_kart.py b/test/test_task_on_kart.py index 6b5a118..b1f4d10 100644 --- a/test/test_task_on_kart.py +++ b/test/test_task_on_kart.py @@ -153,6 +153,12 @@ class TaskTest(unittest.TestCase): self.assertIsInstance(default_target, SingleFileTarget) self.assertEqual(f'./resources/test/test_task_on_kart/_DummyTaskD_{task.task_unique_id}.pkl', default_target._target.path) + def test_default_large_dataframe_target(self): + task = _DummyTaskD() + default_large_dataframe_target = task.make_large_data_frame_target() + self.assertIsInstance(default_large_dataframe_target, ModelTarget) + self.assertEqual(f'./resources/test/test_task_on_kart/_DummyTaskD_{task.task_unique_id}.zip', default_large_dataframe_target._zip_client._file_path) + def test_make_target(self): task = _DummyTask() target = task.make_target('test.txt')
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 3 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohttp==3.8.6 aiosignal==1.2.0 APScheduler==3.10.4 async-timeout==4.0.2 asynctest==0.13.0 attrs==22.2.0 backports.zoneinfo==0.2.1 boto3==1.23.10 botocore==1.26.10 cachetools==4.2.4 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.18.1 execnet==1.9.0 frozenlist==1.2.0 -e git+https://github.com/m3dev/gokart.git@5a10506e5ef762d384fec1651e9cb56daa276336#egg=gokart google-api-core==2.8.2 google-api-python-client==2.52.0 google-auth==2.22.0 google-auth-httplib2==0.2.0 googleapis-common-protos==1.56.3 httplib2==0.22.0 idna==3.10 idna-ssl==1.1.0 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jmespath==0.10.0 lockfile==0.12.2 luigi==3.6.0 multidict==5.2.0 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pluggy==1.0.0 protobuf==3.19.6 py==1.11.0 pyarrow==6.0.1 pyasn1==0.5.1 pyasn1-modules==0.3.0 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 python-daemon==2.3.2 python-dateutil==2.9.0.post0 pytz==2025.2 pytz-deprecation-shim==0.1.0.post0 redis==4.3.6 requests==2.27.1 rsa==4.9 s3transfer==0.5.2 six==1.17.0 slackclient==2.9.4 tenacity==8.2.2 tomli==1.2.3 tornado==6.1 tqdm==4.64.1 typing_extensions==4.1.1 tzdata==2025.2 tzlocal==4.2 uritemplate==4.1.1 urllib3==1.26.20 yarl==1.7.2 zipp==3.6.0
name: gokart channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohttp==3.8.6 - aiosignal==1.2.0 - apscheduler==3.10.4 - async-timeout==4.0.2 - asynctest==0.13.0 - attrs==22.2.0 - backports-zoneinfo==0.2.1 - boto3==1.23.10 - botocore==1.26.10 - cachetools==4.2.4 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.18.1 - execnet==1.9.0 - frozenlist==1.2.0 - google-api-core==2.8.2 - google-api-python-client==2.52.0 - google-auth==2.22.0 - google-auth-httplib2==0.2.0 - googleapis-common-protos==1.56.3 - httplib2==0.22.0 - idna==3.10 - idna-ssl==1.1.0 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jmespath==0.10.0 - lockfile==0.12.2 - luigi==3.6.0 - multidict==5.2.0 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pluggy==1.0.0 - protobuf==3.19.6 - py==1.11.0 - pyarrow==6.0.1 - pyasn1==0.5.1 - pyasn1-modules==0.3.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - python-daemon==2.3.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pytz-deprecation-shim==0.1.0.post0 - redis==4.3.6 - requests==2.27.1 - rsa==4.9 - s3transfer==0.5.2 - six==1.17.0 - slackclient==2.9.4 - tenacity==8.2.2 - tomli==1.2.3 - tornado==6.1 - tqdm==4.64.1 - typing-extensions==4.1.1 - tzdata==2025.2 - tzlocal==4.2 - uritemplate==4.1.1 - urllib3==1.26.20 - yarl==1.7.2 - zipp==3.6.0 prefix: /opt/conda/envs/gokart
[ "test/test_task_on_kart.py::TaskTest::test_default_large_dataframe_target" ]
[]
[ "test/test_task_on_kart.py::TaskTest::test_add_cofigureation_evaluation_order", "test/test_task_on_kart.py::TaskTest::test_add_configuration", "test/test_task_on_kart.py::TaskTest::test_compare_targets_of_different_tasks", "test/test_task_on_kart.py::TaskTest::test_complete_when_input_and_output_equal", "test/test_task_on_kart.py::TaskTest::test_complete_when_modification_time_equals_output", "test/test_task_on_kart.py::TaskTest::test_complete_with_modified_input", "test/test_task_on_kart.py::TaskTest::test_complete_with_rerun_flag", "test/test_task_on_kart.py::TaskTest::test_complete_with_uncompleted_input", "test/test_task_on_kart.py::TaskTest::test_complete_without_dependency", "test/test_task_on_kart.py::TaskTest::test_default_requires", "test/test_task_on_kart.py::TaskTest::test_default_target", "test/test_task_on_kart.py::TaskTest::test_dump", "test/test_task_on_kart.py::TaskTest::test_load_data_frame_drop_columns", "test/test_task_on_kart.py::TaskTest::test_load_data_frame_empty_input", "test/test_task_on_kart.py::TaskTest::test_load_dictionary_at_once", "test/test_task_on_kart.py::TaskTest::test_load_generator_with_single_target", "test/test_task_on_kart.py::TaskTest::test_load_index_only_dataframe", "test/test_task_on_kart.py::TaskTest::test_load_list_of_list_pandas", "test/test_task_on_kart.py::TaskTest::test_load_tuple", "test/test_task_on_kart.py::TaskTest::test_load_with_keyword", "test/test_task_on_kart.py::TaskTest::test_load_with_single_target", "test/test_task_on_kart.py::TaskTest::test_make_model_target", "test/test_task_on_kart.py::TaskTest::test_make_target", "test/test_task_on_kart.py::TaskTest::test_make_target_with_processor", "test/test_task_on_kart.py::TaskTest::test_make_target_without_id", "test/test_task_on_kart.py::TaskTest::test_repr", "test/test_task_on_kart.py::TaskTest::test_significant_flag", "test/test_task_on_kart.py::TaskTest::test_use_rerun_with_inherits" ]
[]
MIT License
9,059
1,004
[ "gokart/redis_lock.py", "gokart/target.py", "gokart/task.py" ]
googleapis__python-storage-333
6ef1de2bd1a9cfab765c736cb849970b2587398c
2020-12-01 18:06:13
6ef1de2bd1a9cfab765c736cb849970b2587398c
andrewsg: Thanks!
diff --git a/google/cloud/storage/blob.py b/google/cloud/storage/blob.py index 4c868b4..ef7b88b 100644 --- a/google/cloud/storage/blob.py +++ b/google/cloud/storage/blob.py @@ -122,6 +122,8 @@ _CHUNKED_DOWNLOAD_CHECKSUM_MESSAGE = ( _DEFAULT_CHUNKSIZE = 104857600 # 1024 * 1024 B * 100 = 100 MB _MAX_MULTIPART_SIZE = 8388608 # 8 MB +_logger = logging.getLogger(__name__) + class Blob(_PropertyMixin): """A wrapper around Cloud Storage's concept of an ``Object``. @@ -923,7 +925,7 @@ class Blob(_PropertyMixin): if checksum: msg = _CHUNKED_DOWNLOAD_CHECKSUM_MESSAGE.format(checksum) - logging.info(msg) + _logger.info(msg) if raw_download: klass = RawChunkedDownload
logging.info() triggers basicConfig() When I use `download_as_string()` method or similar, `logging.root` logger is being activated by calling `logging.info()` on line https://github.com/googleapis/python-storage/blob/63ff23387f5873c609490be8e58d69ba34a10a5e/google/cloud/storage/blob.py#L926. It causes other loggers to be logged by the root logger which is unexpected as some apps require a single logger in strict format. #### Environment details - OS type and version: ubuntu 20.04 - Python version: `3.8.6` - pip version: `20.2.4` - `google-cloud-storage` version: `1.33.0` #### Steps to reproduce 1. set up your own logger for any logger 2. call `logger.info("test")` (nothing should happen) 3. run Client.from_service_account_json().get_bucket(...).blob(..., chunk_size=1024*1024).download_as_string() 4. call `logger.info("test")` one more time 5. log in basic format should appear on stdout (unexpected) ### How to fix it ### you need to use your own logger (e.g. using `logging.getLogger(__name__)` instead of root logger.
googleapis/python-storage
diff --git a/tests/unit/test_blob.py b/tests/unit/test_blob.py index dd7f13a..fc6eda6 100644 --- a/tests/unit/test_blob.py +++ b/tests/unit/test_blob.py @@ -1115,7 +1115,7 @@ class Test_Blob(unittest.TestCase): def test__do_download_w_chunks_w_checksum(self): from google.cloud.storage import blob as blob_module - with mock.patch("logging.info") as patch: + with mock.patch.object(blob_module._logger, "info") as patch: self._do_download_helper_w_chunks( w_range=False, raw_download=False, checksum="md5" ) @@ -1124,7 +1124,9 @@ class Test_Blob(unittest.TestCase): ) def test__do_download_w_chunks_wo_checksum(self): - with mock.patch("logging.info") as patch: + from google.cloud.storage import blob as blob_module + + with mock.patch.object(blob_module._logger, "info") as patch: self._do_download_helper_w_chunks( w_range=False, raw_download=False, checksum=None )
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.33
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==4.2.4 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup==1.2.2 google-api-core==2.10.2 google-auth==1.35.0 google-cloud-core==1.7.3 -e git+https://github.com/googleapis/python-storage.git@6ef1de2bd1a9cfab765c736cb849970b2587398c#egg=google_cloud_storage google-crc32c==1.7.1 google-resumable-media==1.3.3 googleapis-common-protos==1.69.2 idna==3.10 iniconfig==2.1.0 mock==5.2.0 packaging==24.2 pluggy==1.5.0 protobuf==4.25.6 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest==8.3.5 pytest-cov==6.0.0 requests==2.32.3 rsa==4.9 six==1.17.0 tomli==2.2.1 urllib3==2.3.0
name: python-storage channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==4.2.4 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - exceptiongroup==1.2.2 - google-api-core==2.10.2 - google-auth==1.35.0 - google-cloud-core==1.7.3 - google-crc32c==1.7.1 - google-resumable-media==1.3.3 - googleapis-common-protos==1.69.2 - idna==3.10 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - protobuf==4.25.6 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-cov==6.0.0 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/python-storage
[ "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_checksum", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_checksum" ]
[]
[ "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_w_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_w_chunks_wo_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_w_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_wo_range_w_raw", "tests/unit/test_blob.py::Test_Blob::test__do_download_wo_chunks_wo_range_wo_raw", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_bad_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_client", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_generation_not_match", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_kms", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_kms_with_version", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_metadata", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__do_multipart_upload_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_data_corruption", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_predefined_acl", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__do_resumable_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_multipart", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_multipart_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_resumable", "tests/unit/test_blob.py::Test_Blob::test__do_upload_uses_resumable_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__do_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__encryption_headers_w_encryption_key", "tests/unit/test_blob.py::Test_Blob::test__encryption_headers_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_default", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_explicit", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_from_blob", "tests/unit/test_blob.py::Test_Blob::test__get_content_type_from_filename", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_generation", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_on_the_fly_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_media_link", "tests/unit/test_blob.py::Test_Blob::test__get_download_url_with_media_link_w_user_project", "tests/unit/test_blob.py::Test_Blob::test__get_transport", "tests/unit/test_blob.py::Test_Blob::test__get_upload_arguments", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_no_changes", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_unwritable_field", "tests/unit/test_blob.py::Test_Blob::test__get_writable_metadata_with_changes", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_no_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_chunk_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_client", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_extra_headers", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_generation_match", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_generation_not_match", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_kms", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_kms_with_version", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_metadata", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_predefined_acl", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_retry", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_size", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_with_user_project", "tests/unit/test_blob.py::Test_Blob::test__initiate_resumable_upload_without_chunk_size", "tests/unit/test_blob.py::Test_Blob::test__query_params_default", "tests/unit/test_blob.py::Test_Blob::test__query_params_w_generation", "tests/unit/test_blob.py::Test_Blob::test__query_params_w_user_project", "tests/unit/test_blob.py::Test_Blob::test__set_metadata_to_none", "tests/unit/test_blob.py::Test_Blob::test__set_properties_w_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test__set_properties_wo_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test_acl_property", "tests/unit/test_blob.py::Test_Blob::test_bucket_readonly_property", "tests/unit/test_blob.py::Test_Blob::test_cache_control_getter", "tests/unit/test_blob.py::Test_Blob::test_cache_control_setter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_ctor", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_getter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_setter", "tests/unit/test_blob.py::Test_Blob::test_chunk_size_setter_bad_value", "tests/unit/test_blob.py::Test_Blob::test_client", "tests/unit/test_blob.py::Test_Blob::test_component_count", "tests/unit/test_blob.py::Test_Blob::test_component_count_string_val", "tests/unit/test_blob.py::Test_Blob::test_component_count_unset", "tests/unit/test_blob.py::Test_Blob::test_compose_minimal_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_compose_w_additional_property_changes", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match_bad_length", "tests/unit/test_blob.py::Test_Blob::test_compose_w_generation_match_nones", "tests/unit/test_blob.py::Test_Blob::test_compose_wo_content_type_set", "tests/unit/test_blob.py::Test_Blob::test_content_disposition_getter", "tests/unit/test_blob.py::Test_Blob::test_content_disposition_setter", "tests/unit/test_blob.py::Test_Blob::test_content_encoding_getter", "tests/unit/test_blob.py::Test_Blob::test_content_encoding_setter", "tests/unit/test_blob.py::Test_Blob::test_content_language_getter", "tests/unit/test_blob.py::Test_Blob::test_content_language_setter", "tests/unit/test_blob.py::Test_Blob::test_content_type_getter", "tests/unit/test_blob.py::Test_Blob::test_content_type_setter", "tests/unit/test_blob.py::Test_Blob::test_crc32c_getter", "tests/unit/test_blob.py::Test_Blob::test_crc32c_setter", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_failure", "tests/unit/test_blob.py::Test_Blob::test_create_resumable_upload_session_with_origin", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_kms_key_name", "tests/unit/test_blob.py::Test_Blob::test_ctor_w_kms_key_name_and_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_ctor_with_encoded_unicode", "tests/unit/test_blob.py::Test_Blob::test_ctor_with_generation", "tests/unit/test_blob.py::Test_Blob::test_ctor_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_custom_time_getter", "tests/unit/test_blob.py::Test_Blob::test_custom_time_setter", "tests/unit/test_blob.py::Test_Blob::test_custom_time_setter_none_value", "tests/unit/test_blob.py::Test_Blob::test_custom_time_unset", "tests/unit/test_blob.py::Test_Blob::test_delete_w_generation", "tests/unit/test_blob.py::Test_Blob::test_delete_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_delete_wo_generation", "tests/unit/test_blob.py::Test_Blob::test_download_as_byte_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_bytes_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_string", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_hash_response_header_none", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_response_headers", "tests/unit/test_blob.py::Test_Blob::test_download_as_string_w_response_headers_not_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_client", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_encoding", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_end", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_if_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_if_generation_not_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_if_metageneration_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_if_metageneration_not_match", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_no_charset", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_non_ascii_w_explicit_encoding", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_non_ascii_wo_explicit_encoding_w_charset", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_w_start", "tests/unit/test_blob.py::Test_Blob::test_download_as_text_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_chunks_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_chunks_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_with_failure", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_chunks_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_chunks_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_file_wo_media_link", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_corrupted", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_key", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_updated_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_w_updated_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_wo_updated_w_raw", "tests/unit/test_blob.py::Test_Blob::test_download_to_filename_wo_updated_wo_raw", "tests/unit/test_blob.py::Test_Blob::test_etag", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_false", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_missing", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_getter_true", "tests/unit/test_blob.py::Test_Blob::test_event_based_hold_setter", "tests/unit/test_blob.py::Test_Blob::test_exists_hit_w_generation", "tests/unit/test_blob.py::Test_Blob::test_exists_hit_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_exists_miss", "tests/unit/test_blob.py::Test_Blob::test_exists_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_domain_name_bucket", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_invalid_uri", "tests/unit/test_blob.py::Test_Blob::test_from_string_w_valid_uri", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_no_version_passed_warning", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_content_md5", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_content_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_credentials", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_csek", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_csek_and_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_defaults", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_endpoint", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_expiration", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_generation", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_lowercase_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_non_ascii_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_response_disposition", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_response_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v2_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_bucket_bound_hostname_w_bare_hostname", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_bucket_bound_hostname_w_scheme", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_content_md5", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_content_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_credentials", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_csek", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_csek_and_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_defaults", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_endpoint", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_generation", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_headers", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_lowercase_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_method", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_non_ascii_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_response_disposition", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_response_type", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_v4_w_virtual_hostname", "tests/unit/test_blob.py::Test_Blob::test_generate_signed_url_w_invalid_version", "tests/unit/test_blob.py::Test_Blob::test_generation", "tests/unit/test_blob.py::Test_Blob::test_generation_string_val", "tests/unit/test_blob.py::Test_Blob::test_generation_unset", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy_w_requested_policy_version", "tests/unit/test_blob.py::Test_Blob::test_get_iam_policy_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_id", "tests/unit/test_blob.py::Test_Blob::test_make_private", "tests/unit/test_blob.py::Test_Blob::test_make_public", "tests/unit/test_blob.py::Test_Blob::test_md5_hash_getter", "tests/unit/test_blob.py::Test_Blob::test_md5_hash_setter", "tests/unit/test_blob.py::Test_Blob::test_media_link", "tests/unit/test_blob.py::Test_Blob::test_metadata_getter", "tests/unit/test_blob.py::Test_Blob::test_metadata_setter", "tests/unit/test_blob.py::Test_Blob::test_metadata_setter_w_nan", "tests/unit/test_blob.py::Test_Blob::test_metageneration", "tests/unit/test_blob.py::Test_Blob::test_metageneration_string_val", "tests/unit/test_blob.py::Test_Blob::test_metageneration_unset", "tests/unit/test_blob.py::Test_Blob::test_owner", "tests/unit/test_blob.py::Test_Blob::test_path_bad_bucket", "tests/unit/test_blob.py::Test_Blob::test_path_no_name", "tests/unit/test_blob.py::Test_Blob::test_path_normal", "tests/unit/test_blob.py::Test_Blob::test_path_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_path_with_non_ascii", "tests/unit/test_blob.py::Test_Blob::test_public_url", "tests/unit/test_blob.py::Test_Blob::test_public_url_w_slash_in_name", "tests/unit/test_blob.py::Test_Blob::test_public_url_w_tilde_in_name", "tests/unit/test_blob.py::Test_Blob::test_public_url_with_non_ascii", "tests/unit/test_blob.py::Test_Blob::test_retention_expiration_time", "tests/unit/test_blob.py::Test_Blob::test_retention_expiration_time_unset", "tests/unit/test_blob.py::Test_Blob::test_rewrite_other_bucket_other_name_no_encryption_partial", "tests/unit/test_blob.py::Test_Blob::test_rewrite_response_without_resource", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_no_key_new_key_w_token", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_no_old_key_new_key_done_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_rewrite_same_name_w_old_key_new_kms_key", "tests/unit/test_blob.py::Test_Blob::test_rewrite_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_rewrite_w_generations", "tests/unit/test_blob.py::Test_Blob::test_self_link", "tests/unit/test_blob.py::Test_Blob::test_set_iam_policy", "tests/unit/test_blob.py::Test_Blob::test_set_iam_policy_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_size", "tests/unit/test_blob.py::Test_Blob::test_size_string_val", "tests/unit/test_blob.py::Test_Blob::test_size_unset", "tests/unit/test_blob.py::Test_Blob::test_storage_class_getter", "tests/unit/test_blob.py::Test_Blob::test_storage_class_setter", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_false", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_missing", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_getter_true", "tests/unit/test_blob.py::Test_Blob::test_temporary_hold_setter", "tests/unit/test_blob.py::Test_Blob::test_test_iam_permissions", "tests/unit/test_blob.py::Test_Blob::test_test_iam_permissions_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_time_created", "tests/unit/test_blob.py::Test_Blob::test_time_created_unset", "tests/unit/test_blob.py::Test_Blob::test_time_deleted", "tests/unit/test_blob.py::Test_Blob::test_time_deleted_unset", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_invalid", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_large_file", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_w_encryption_key_w_user_project", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_w_generation_match", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_update_storage_class_wo_encryption_key", "tests/unit/test_blob.py::Test_Blob::test_updated", "tests/unit/test_blob.py::Test_Blob::test_updated_unset", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_failure", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_success", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_retries", "tests/unit/test_blob.py::Test_Blob::test_upload_from_file_with_rewind", "tests/unit/test_blob.py::Test_Blob::test_upload_from_filename", "tests/unit/test_blob.py::Test_Blob::test_upload_from_filename_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_bytes", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_custom_timeout", "tests/unit/test_blob.py::Test_Blob::test_upload_from_string_w_text", "tests/unit/test_blob.py::Test_Blob::test_user_project", "tests/unit/test_blob.py::Test__quote::test_bad_type", "tests/unit/test_blob.py::Test__quote::test_bytes", "tests/unit/test_blob.py::Test__quote::test_unicode", "tests/unit/test_blob.py::Test__quote::test_w_slash_default", "tests/unit/test_blob.py::Test__quote::test_w_slash_w_safe", "tests/unit/test_blob.py::Test__quote::test_w_tilde", "tests/unit/test_blob.py::Test__maybe_rewind::test_default", "tests/unit/test_blob.py::Test__maybe_rewind::test_do_not_rewind", "tests/unit/test_blob.py::Test__maybe_rewind::test_do_rewind", "tests/unit/test_blob.py::Test__raise_from_invalid_response::test_default", "tests/unit/test_blob.py::Test__raise_from_invalid_response::test_w_206_and_args", "tests/unit/test_blob.py::Test__add_query_parameters::test_w_empty_list", "tests/unit/test_blob.py::Test__add_query_parameters::test_w_existing_qs", "tests/unit/test_blob.py::Test__add_query_parameters::test_wo_existing_qs" ]
[]
Apache License 2.0
9,080
240
[ "google/cloud/storage/blob.py" ]
TDAmeritrade__stumpy-288
4adf60adba3782043a007c01c5785496548ea308
2020-12-02 09:39:07
4adf60adba3782043a007c01c5785496548ea308
seanlaw: @DanBenHa Thank you for the PR! I couldn't find much information on when negative values may occur except for one article that simply set those values to zero before taking the square root. I am wondering if there are situations where setting the negative values to zero is wrong? Otherwise, everything else looks good and this should be ready to merge. DanBenHa: > I am wondering if there are situations where setting the negative values to zero is wrong? Good question. I don't know. But that would imply calculating a complex value for the distance of two real-valued vectors. Would that make sense? codecov-io: # [Codecov](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288?src=pr&el=h1) Report > Merging [#288](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288?src=pr&el=desc) (593098f) into [master](https://codecov.io/gh/TDAmeritrade/stumpy/commit/4adf60adba3782043a007c01c5785496548ea308?el=desc) (4adf60a) will **decrease** coverage by `0.23%`. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/graphs/tree.svg?width=650&height=150&src=pr&token=u0DooAbGji)](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #288 +/- ## ========================================== - Coverage 99.82% 99.58% -0.24% ========================================== Files 19 19 Lines 1732 1700 -32 ========================================== - Hits 1729 1693 -36 - Misses 3 7 +4 ``` | [Impacted Files](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [stumpy/core.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L2NvcmUucHk=) | `100.00% <100.00%> (ø)` | | | [stumpy/ostinato.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L29zdGluYXRvLnB5) | `89.55% <0.00%> (-5.98%)` | :arrow_down: | | [stumpy/aamp.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L2FhbXAucHk=) | `100.00% <0.00%> (ø)` | | | [stumpy/aampi.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L2FhbXBpLnB5) | `100.00% <0.00%> (ø)` | | | [stumpy/floss.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L2Zsb3NzLnB5) | `100.00% <0.00%> (ø)` | | | [stumpy/stump.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L3N0dW1wLnB5) | `100.00% <0.00%> (ø)` | | | [stumpy/mstump.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L21zdHVtcC5weQ==) | `100.00% <0.00%> (ø)` | | | [stumpy/scrump.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L3NjcnVtcC5weQ==) | `100.00% <0.00%> (ø)` | | | [stumpy/stumpi.py](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree#diff-c3R1bXB5L3N0dW1waS5weQ==) | `100.00% <0.00%> (ø)` | | | ... and [2 more](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288?src=pr&el=footer). Last update [4adf60a...593098f](https://codecov.io/gh/TDAmeritrade/stumpy/pull/288?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). seanlaw: > Good question. I don't know. But that would imply calculating a complex value for the distance of two real-valued vectors. Would that make sense? Since negative values imply some sort of deficiency in the numerical precision, I think what I'm really asking is if there might be situations where the amplitude of the negative value may be indicative of something that is a lot worst (while a small, near zero amplitude is due to numerical precision). Just thinking out loud here though :) DanBenHa: > Since negative values imply some sort of deficiency in the numerical precision, I think what I'm really asking is if there might be situations where the amplitude of the negative value may be indicative of something that is a lot worst (while a small, near zero amplitude is due to numerical precision). Just thinking out loud here though :) I see. I guess it could be indicative of problems in the computation of `Q_squared`, `T_squared` and `QT`, so primarily in the functions `rolling_is_finite`, `sliding_dot_product` and `rolling_window`. seanlaw: Okay, I spent most of my morning looking for an ideal solution and, while there were many discussions, there were no obvious solutions
diff --git a/stumpy/core.py b/stumpy/core.py index dba5e87..96bb7d3 100644 --- a/stumpy/core.py +++ b/stumpy/core.py @@ -861,7 +861,9 @@ def _mass_absolute(Q_squared, T_squared, QT): `See Mueen's Absolute Algorithm for Similarity Search \ <https://www.cs.unm.edu/~mueen/MASS_absolute.m>`__ """ - return np.sqrt(Q_squared + T_squared - 2 * QT) + D = Q_squared + T_squared - 2 * QT + D[D < 0] = 0.0 + return np.sqrt(D) def mass_absolute(Q, T):
mass_absolute returns NaN for some negative time-series I found this while playing with the PAMAP dataset. Haven't been able to reproduce this with random numbers, so here's the relevant data and code. ```python Q = np.array([-13.09, -14.1 , -15.08, -16.31, -17.13, -17.5 , -18.07, -18.07, -17.48, -16.24, -14.88, -13.56, -12.65, -11.93, -11.48, -11.06, -10.83, -10.67, -10.59, -10.81, -10.92, -11.15, -11.37, -11.53, -11.19, -11.08, -10.48, -10.14, -9.92, -9.99, -10.11, -9.92, -9.7 , -9.47, -9.06, -9.01, -8.79, -8.67, -8.33, -8. , -8.26, -8. , -7.54, -7.32, -7.13, -7.24, -7.43, -7.93, -8.8 , -9.71]) print(stumpy.core.mass_absolute(Q, Q)) ``` I get `nan`, but of course it should be `0`. The problem is some float imprecision in `_mass_absolute` that leads to negative values that can't be properly `np.sqrt`ed. Easy to fix, of course. A test for this edge case should be added as well.
TDAmeritrade/stumpy
diff --git a/tests/test_core.py b/tests/test_core.py index 482ad79..3361beb 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -430,6 +430,66 @@ def test_mass_absolute_T_inf(Q, T): npt.assert_almost_equal(ref, comp) +def test_mass_absolute_sqrt_input_negative(): + Q = np.array( + [ + -13.09, + -14.1, + -15.08, + -16.31, + -17.13, + -17.5, + -18.07, + -18.07, + -17.48, + -16.24, + -14.88, + -13.56, + -12.65, + -11.93, + -11.48, + -11.06, + -10.83, + -10.67, + -10.59, + -10.81, + -10.92, + -11.15, + -11.37, + -11.53, + -11.19, + -11.08, + -10.48, + -10.14, + -9.92, + -9.99, + -10.11, + -9.92, + -9.7, + -9.47, + -9.06, + -9.01, + -8.79, + -8.67, + -8.33, + -8.0, + -8.26, + -8.0, + -7.54, + -7.32, + -7.13, + -7.24, + -7.43, + -7.93, + -8.8, + -9.71, + ] + ) + ref = 0 + comp = core.mass_absolute(Q, Q) + npt.assert_almost_equal(ref, comp) + + def test_apply_exclusion_zone(): T = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=float) ref = np.empty(T.shape)
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[ci]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==25.1.0 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 cloudpickle==3.1.1 codecov==2.1.13 coverage==7.8.0 dask==2024.8.0 distributed==2024.8.0 exceptiongroup==1.2.2 execnet==2.1.1 flake8==7.2.0 flake8-docstrings==1.7.0 fsspec==2025.3.2 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 llvmlite==0.43.0 locket==1.0.0 MarkupSafe==3.0.2 mccabe==0.7.0 msgpack==1.1.0 mypy-extensions==1.0.0 numba==0.60.0 numpy==2.0.2 packaging==24.2 pandas==2.2.3 partd==1.4.2 pathspec==0.12.1 platformdirs==4.3.7 pluggy==1.5.0 psutil==7.0.0 pycodestyle==2.13.0 pydocstyle==6.3.0 pyflakes==3.3.2 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 requests==2.32.3 scipy==1.13.1 six==1.17.0 snowballstemmer==2.2.0 sortedcontainers==2.4.0 -e git+https://github.com/TDAmeritrade/stumpy.git@4adf60adba3782043a007c01c5785496548ea308#egg=stumpy tbb==2022.1.0 tblib==3.1.0 tcmlib==1.3.0 tomli==2.2.1 toolz==1.0.0 tornado==6.4.2 typing_extensions==4.13.0 tzdata==2025.2 urllib3==2.3.0 zict==3.0.0 zipp==3.21.0
name: stumpy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==25.1.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - cloudpickle==3.1.1 - codecov==2.1.13 - coverage==7.8.0 - dask==2024.8.0 - distributed==2024.8.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - flake8==7.2.0 - flake8-docstrings==1.7.0 - fsspec==2025.3.2 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - llvmlite==0.43.0 - locket==1.0.0 - markupsafe==3.0.2 - mccabe==0.7.0 - msgpack==1.1.0 - mypy-extensions==1.0.0 - numba==0.60.0 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - partd==1.4.2 - pathspec==0.12.1 - platformdirs==4.3.7 - pluggy==1.5.0 - psutil==7.0.0 - pycodestyle==2.13.0 - pydocstyle==6.3.0 - pyflakes==3.3.2 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - requests==2.32.3 - scipy==1.13.1 - six==1.17.0 - snowballstemmer==2.2.0 - sortedcontainers==2.4.0 - tbb==2022.1.0 - tblib==3.1.0 - tcmlib==1.3.0 - tomli==2.2.1 - toolz==1.0.0 - tornado==6.4.2 - typing-extensions==4.13.0 - tzdata==2025.2 - urllib3==2.3.0 - zict==3.0.0 - zipp==3.21.0 prefix: /opt/conda/envs/stumpy
[ "tests/test_core.py::test_mass_absolute_sqrt_input_negative" ]
[]
[ "tests/test_core.py::test_check_dtype_float32", "tests/test_core.py::test_check_dtype_float64", "tests/test_core.py::test_check_window_size", "tests/test_core.py::test_sliding_dot_product[Q0-T0]", "tests/test_core.py::test_sliding_dot_product[Q1-T1]", "tests/test_core.py::test_sliding_dot_product[Q2-T2]", "tests/test_core.py::test_welford_nanvar", "tests/test_core.py::test_welford_nanvar_catastrophic_cancellation", "tests/test_core.py::test_welford_nanvar_nan", "tests/test_core.py::test_welford_nanstd", "tests/test_core.py::test_compute_mean_std[Q0-T0]", "tests/test_core.py::test_compute_mean_std[Q1-T1]", "tests/test_core.py::test_compute_mean_std[Q2-T2]", "tests/test_core.py::test_compute_mean_std_chunked[Q0-T0]", "tests/test_core.py::test_compute_mean_std_chunked[Q1-T1]", "tests/test_core.py::test_compute_mean_std_chunked[Q2-T2]", "tests/test_core.py::test_compute_mean_std_chunked_many[Q0-T0]", "tests/test_core.py::test_compute_mean_std_chunked_many[Q1-T1]", "tests/test_core.py::test_compute_mean_std_chunked_many[Q2-T2]", "tests/test_core.py::test_compute_mean_std_multidimensional[Q0-T0]", "tests/test_core.py::test_compute_mean_std_multidimensional[Q1-T1]", "tests/test_core.py::test_compute_mean_std_multidimensional[Q2-T2]", "tests/test_core.py::test_compute_mean_std_multidimensional_chunked[Q0-T0]", "tests/test_core.py::test_compute_mean_std_multidimensional_chunked[Q1-T1]", "tests/test_core.py::test_compute_mean_std_multidimensional_chunked[Q2-T2]", "tests/test_core.py::test_compute_mean_std_multidimensional_chunked_many[Q0-T0]", "tests/test_core.py::test_compute_mean_std_multidimensional_chunked_many[Q1-T1]", "tests/test_core.py::test_compute_mean_std_multidimensional_chunked_many[Q2-T2]", "tests/test_core.py::test_calculate_squared_distance_profile[Q0-T0]", "tests/test_core.py::test_calculate_squared_distance_profile[Q1-T1]", "tests/test_core.py::test_calculate_squared_distance_profile[Q2-T2]", "tests/test_core.py::test_calculate_distance_profile[Q0-T0]", "tests/test_core.py::test_calculate_distance_profile[Q1-T1]", "tests/test_core.py::test_calculate_distance_profile[Q2-T2]", "tests/test_core.py::test_mueen_calculate_distance_profile[Q0-T0]", "tests/test_core.py::test_mueen_calculate_distance_profile[Q1-T1]", "tests/test_core.py::test_mueen_calculate_distance_profile[Q2-T2]", "tests/test_core.py::test_mass[Q0-T0]", "tests/test_core.py::test_mass[Q1-T1]", "tests/test_core.py::test_mass[Q2-T2]", "tests/test_core.py::test_mass_Q_nan[Q0-T0]", "tests/test_core.py::test_mass_Q_nan[Q1-T1]", "tests/test_core.py::test_mass_Q_nan[Q2-T2]", "tests/test_core.py::test_mass_Q_inf[Q0-T0]", "tests/test_core.py::test_mass_Q_inf[Q1-T1]", "tests/test_core.py::test_mass_Q_inf[Q2-T2]", "tests/test_core.py::test_mass_T_nan[Q0-T0]", "tests/test_core.py::test_mass_T_nan[Q1-T1]", "tests/test_core.py::test_mass_T_nan[Q2-T2]", "tests/test_core.py::test_mass_T_inf[Q0-T0]", "tests/test_core.py::test_mass_T_inf[Q1-T1]", "tests/test_core.py::test_mass_T_inf[Q2-T2]", "tests/test_core.py::test_mass_asbolute[Q0-T0]", "tests/test_core.py::test_mass_asbolute[Q1-T1]", "tests/test_core.py::test_mass_asbolute[Q2-T2]", "tests/test_core.py::test_mass_absolute_Q_nan[Q0-T0]", "tests/test_core.py::test_mass_absolute_Q_nan[Q1-T1]", "tests/test_core.py::test_mass_absolute_Q_nan[Q2-T2]", "tests/test_core.py::test_mass_absolute_Q_inf[Q0-T0]", "tests/test_core.py::test_mass_absolute_Q_inf[Q1-T1]", "tests/test_core.py::test_mass_absolute_Q_inf[Q2-T2]", "tests/test_core.py::test_mass_absolute_T_nan[Q0-T0]", "tests/test_core.py::test_mass_absolute_T_nan[Q1-T1]", "tests/test_core.py::test_mass_absolute_T_nan[Q2-T2]", "tests/test_core.py::test_mass_absolute_T_inf[Q0-T0]", "tests/test_core.py::test_mass_absolute_T_inf[Q1-T1]", "tests/test_core.py::test_mass_absolute_T_inf[Q2-T2]", "tests/test_core.py::test_apply_exclusion_zone", "tests/test_core.py::test_apply_exclusion_zone_multidimensional", "tests/test_core.py::test_preprocess", "tests/test_core.py::test_preprocess_non_normalized", "tests/test_core.py::test_preprocess_diagonal", "tests/test_core.py::test_replace_distance", "tests/test_core.py::test_array_to_temp_file", "tests/test_core.py::test_count_diagonal_ndist", "tests/test_core.py::test_get_array_ranges", "tests/test_core.py::test_get_array_ranges_exhausted", "tests/test_core.py::test_get_array_ranges_exhausted_truncated", "tests/test_core.py::test_rolling_isfinite" ]
[]
3-Clause BSD license
9,086
171
[ "stumpy/core.py" ]
bridgecrewio__checkov-711
e6ba23cbd7b570111bd8ca485371dceeba90d318
2020-12-02 11:45:01
25b466be980e420e64519fbf74c93a35a0c94203
diff --git a/checkov/kubernetes/checks/Seccomp.py b/checkov/kubernetes/checks/Seccomp.py index f1991102c..fdd9392b4 100644 --- a/checkov/kubernetes/checks/Seccomp.py +++ b/checkov/kubernetes/checks/Seccomp.py @@ -1,3 +1,4 @@ +import dpath from checkov.common.models.enums import CheckCategories, CheckResult from checkov.kubernetes.base_spec_check import BaseK8Check @@ -12,6 +13,7 @@ class Seccomp(BaseK8Check): # Location: Pod.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod # Location: CronJob.spec.jobTemplate.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod # Location: *.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod + # Location: *.spec.securityContext.seccompProfile.type supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob'] categories = [CheckCategories.KUBERNETES] super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind) @@ -26,6 +28,10 @@ class Seccomp(BaseK8Check): metadata = {} if conf['kind'] == 'Pod': + security_profile = dpath.search(conf, 'spec/securityContext/seccompProfile/type') + if security_profile: + security_profile = dpath.get(conf, 'spec/securityContext/seccompProfile/type') + return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED if "metadata" in conf: metadata = conf["metadata"] elif conf['kind'] == 'CronJob': @@ -45,8 +51,9 @@ class Seccomp(BaseK8Check): if "annotations" in metadata and isinstance(metadata['annotations'], dict): if "seccomp.security.alpha.kubernetes.io/pod" in metadata["annotations"]: if ("docker/default" in metadata["annotations"]["seccomp.security.alpha.kubernetes.io/pod"] or - "runtime/default" in metadata["annotations"]["seccomp.security.alpha.kubernetes.io/pod"]): + "runtime/default" in metadata["annotations"]["seccomp.security.alpha.kubernetes.io/pod"]): return CheckResult.PASSED return CheckResult.FAILED + check = Seccomp() diff --git a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py index fa84fb285..4e249902a 100644 --- a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py +++ b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py @@ -1,6 +1,7 @@ from checkov.common.models.enums import CheckResult, CheckCategories from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck + class ALBListenerHTTPS(BaseResourceCheck): def __init__(self): @@ -26,8 +27,12 @@ class ALBListenerHTTPS(BaseResourceCheck): default_action = conf['default_action'][0] action_type = default_action['type'] if action_type == ['redirect']: - if default_action.get('redirect') and default_action['redirect'][0]['protocol'] == ['HTTPS']: - return CheckResult.PASSED + if default_action.get('redirect'): + protocol = default_action['redirect'][0].get('protocol') + if protocol == ['HTTPS']: + return CheckResult.PASSED + elif protocol is None: + return CheckResult.UNKNOWN return CheckResult.FAILED
CKV_K8S_31 failure with RuntimeDefault configured **Describe the bug** CKV_K8S_31 failure when the `seccompProfile` type is configured as `RuntimeDefault` which is the new syntax. **To Reproduce** 1. Define security context as below. ```yaml ... securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault ... ``` 2. Run checkov for kubernetes. **Expected behavior** CKV_K8S_31 to pass with the following configuration. ```yaml ... securityContext: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault ... ``` **Screenshots** N/A **Desktop (please complete the following information):** - Kubernetes 1.19.0 - OS: Ubuntu 18.04 - Checkov Version: 1.0.657 **Additional context** https://kubernetes.io/docs/tutorials/clusters/seccomp/#create-pod-that-uses-the-container-runtime-default-seccomp-profile
bridgecrewio/checkov
diff --git a/tests/kubernetes/checks/example_Seccomp/pod-seccomp-PASSED3.yaml b/tests/kubernetes/checks/example_Seccomp/pod-seccomp-PASSED3.yaml new file mode 100644 index 000000000..64c92f42e --- /dev/null +++ b/tests/kubernetes/checks/example_Seccomp/pod-seccomp-PASSED3.yaml @@ -0,0 +1,27 @@ +# Source: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +apiVersion: v1 +kind: Pod +metadata: + name: seccomp-passed +spec: + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + seccompProfile: + type: RuntimeDefault + volumes: + - name: sec-ctx-vol + emptyDir: {} + containers: + - name: sec-ctx-demo + image: busybox + command: [ "sh", "-c", "sleep 1h" ] + volumeMounts: + - name: sec-ctx-vol + mountPath: /data/demo + securityContext: + allowPrivilegeEscalation: false + +# kubectl exec -it seccomp-failed -- grep Seccomp /proc/1/status +### This will return 'Seccomp: 0' indicating docker running with no Seccomp diff --git a/tests/kubernetes/checks/test_Seccomp.py b/tests/kubernetes/checks/test_Seccomp.py index 742e25141..01530f432 100644 --- a/tests/kubernetes/checks/test_Seccomp.py +++ b/tests/kubernetes/checks/test_Seccomp.py @@ -16,7 +16,7 @@ class TestSeccomp(unittest.TestCase): report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id])) summary = report.get_summary() - self.assertEqual(summary['passed'], 2) + self.assertEqual(summary['passed'], 3) self.assertEqual(summary['failed'], 1) self.assertEqual(summary['skipped'], 0) self.assertEqual(summary['parsing_errors'], 0) diff --git a/tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py b/tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py index 5364f42d5..effe3d1cf 100644 --- a/tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py +++ b/tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py @@ -1,5 +1,7 @@ import unittest +import hcl2 + from checkov.terraform.checks.resource.aws.ALBListenerHTTPS import check from checkov.common.models.enums import CheckResult @@ -43,6 +45,25 @@ class TestALBListenerHTTPS(unittest.TestCase): scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) + def test_success_no_protocol(self): + hcl_res = hcl2.loads(""" +resource "aws_lb_listener" "http_redirector" { + load_balancer_arn = aws_lb.redirector.arn + port = "80" + protocol = "HTTP" + default_action { + type = "redirect" + redirect { + host = "example.com" + status_code = "HTTP_302" + } + } +} + """) + resource_conf = hcl_res['resource'][0]['aws_lb_listener']['http_redirector'] + result = check.scan_resource_conf(resource_conf) + self.assertEqual(CheckResult.UNKNOWN, result) + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 2 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y git" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work bc-python-hcl2==0.3.51 boto3==1.12.43 botocore==1.15.49 certifi @ file:///croot/certifi_1671487769961/work/certifi chardet==3.0.4 -e git+https://github.com/bridgecrewio/checkov.git@e6ba23cbd7b570111bd8ca485371dceeba90d318#egg=checkov colorama==0.4.3 deep-merge==0.0.4 docopt==0.6.2 docutils==0.15.2 dpath==1.5.0 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core gitdb==4.0.5 GitPython==3.1.7 idna==2.8 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work importlib-resources==5.12.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==0.10.0 junit-xml==1.8 lark==1.1.9 lark-parser==0.7.8 packaging==20.4 pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing==3.1.4 pytest==7.1.2 python-dateutil==2.9.0.post0 PyYAML==5.3.1 requests==2.22.0 s3transfer==0.3.7 semantic-version==2.8.5 six==1.15.0 smmap==3.0.5 tabulate==0.8.6 termcolor==1.1.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work tqdm==4.49.0 typing_extensions @ file:///croot/typing_extensions_1669924550328/work update-checker==0.18.0 urllib3==1.25.10 zipp @ file:///croot/zipp_1672387121353/work
name: checkov channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - bc-python-hcl2==0.3.51 - boto3==1.12.43 - botocore==1.15.49 - chardet==3.0.4 - colorama==0.4.3 - deep-merge==0.0.4 - docopt==0.6.2 - docutils==0.15.2 - dpath==1.5.0 - gitdb==4.0.5 - gitpython==3.1.7 - idna==2.8 - importlib-resources==5.12.0 - jmespath==0.10.0 - junit-xml==1.8 - lark==1.1.9 - lark-parser==0.7.8 - packaging==20.4 - pyparsing==3.1.4 - python-dateutil==2.9.0.post0 - pyyaml==5.3.1 - requests==2.22.0 - s3transfer==0.3.7 - semantic-version==2.8.5 - six==1.15.0 - smmap==3.0.5 - tabulate==0.8.6 - termcolor==1.1.0 - tqdm==4.49.0 - update-checker==0.18.0 - urllib3==1.25.10 prefix: /opt/conda/envs/checkov
[ "tests/kubernetes/checks/test_Seccomp.py::TestSeccomp::test_summary", "tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py::TestALBListenerHTTPS::test_success_no_protocol" ]
[]
[ "tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py::TestALBListenerHTTPS::test_failure", "tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py::TestALBListenerHTTPS::test_nlb_tcp_success", "tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py::TestALBListenerHTTPS::test_nlb_tcp_udp_success", "tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py::TestALBListenerHTTPS::test_nlb_udp_success", "tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py::TestALBListenerHTTPS::test_success", "tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py::TestALBListenerHTTPS::test_success_redirect" ]
[]
Apache License 2.0
9,088
867
[ "checkov/kubernetes/checks/Seccomp.py", "checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py" ]
dstl__Stone-Soup-346
0194cff0ed89a5a0de0d018a8d8ff067087e5df2
2020-12-02 12:28:08
0194cff0ed89a5a0de0d018a8d8ff067087e5df2
diff --git a/stonesoup/models/base.py b/stonesoup/models/base.py index 85d048d5..7ed4634d 100644 --- a/stonesoup/models/base.py +++ b/stonesoup/models/base.py @@ -188,8 +188,14 @@ class GaussianModel(Model): distribution. """ + covar = self.covar(**kwargs) + + # If model has None-type covariance or contains None, it does not represent a Gaussian + if covar is None or None in covar: + raise ValueError("Cannot generate rvs from None-type covariance") + noise = multivariate_normal.rvs( - np.zeros(self.ndim), self.covar(**kwargs), num_samples) + np.zeros(self.ndim), covar, num_samples) noise = np.atleast_2d(noise) @@ -227,11 +233,17 @@ class GaussianModel(Model): The likelihood of ``state1``, given ``state2`` """ + covar = self.covar(**kwargs) + + # If model has None-type covariance or contains None, it does not represent a Gaussian + if covar is None or None in covar: + raise ValueError("Cannot generate pdf from None-type covariance") + # Calculate difference before to handle custom types (mean defaults to zero) # This is required as log pdf coverts arrays to floats likelihood = multivariate_normal.logpdf( (state1.state_vector - self.function(state2, **kwargs)).ravel(), - cov=self.covar(**kwargs) + cov=covar ) return Probability(likelihood, log_value=True)
GuassianModel rvs method generates noise from identity if covariance is None-type Would it be appropriate to handle `covar = None` as 'noiseless'? Currently, using the scipy.stats.multivariate_normal.rvs method will default a covariance to the identity matrix. Therefore, when passing in None, noise is generated.
dstl/Stone-Soup
diff --git a/stonesoup/models/measurement/tests/test_combined.py b/stonesoup/models/measurement/tests/test_combined.py index eb73837c..e27bd8ee 100644 --- a/stonesoup/models/measurement/tests/test_combined.py +++ b/stonesoup/models/measurement/tests/test_combined.py @@ -107,3 +107,15 @@ def test_mismatch_ndim_state(): CartesianToBearingRange(3, [0, 1], np.diag([1, 10])), CartesianToBearingRange(4, [0, 1], np.diag([1, 10])), ]) + + +def test_none_covar(): + new_model = CombinedReversibleGaussianMeasurementModel([ + CartesianToBearingRange(4, [0, 1], None), + CartesianToBearingRange(4, [0, 1], np.diag([1, 10])) + ]) + + with pytest.raises(ValueError, match="Cannot generate rvs from None-type covariance"): + new_model.rvs() + with pytest.raises(ValueError, match="Cannot generate pdf from None-type covariance"): + new_model.pdf(State([0, 0, 0, 0]), State([0, 0, 0, 0])) diff --git a/stonesoup/models/measurement/tests/test_models.py b/stonesoup/models/measurement/tests/test_models.py index e3defa13..8fc8ae28 100644 --- a/stonesoup/models/measurement/tests/test_models.py +++ b/stonesoup/models/measurement/tests/test_models.py @@ -9,6 +9,7 @@ from ..nonlinear import ( CartesianToElevationBearing, Cartesian2DToBearing, CartesianToBearingRangeRate, CartesianToElevationBearingRangeRate) from ...base import ReversibleModel +from ...measurement.linear import LinearGaussian from ....functions import jacobian as compute_jac from ....functions import pol2cart from ....functions import rotz, rotx, roty, cart2sphere @@ -79,6 +80,22 @@ def hbearing(state_vector, pos_map, translation_offset, rotation_offset): return StateVector([Elevation(theta), Bearing(phi)]) [email protected]( + "model_class", + [LinearGaussian, + CartesianToElevationBearingRange, + CartesianToBearingRange, + CartesianToElevationBearing, + Cartesian2DToBearing, + CartesianToBearingRangeRate, + CartesianToElevationBearingRangeRate] +) +def test_none_covar(model_class): + model = model_class(ndim_state=0, mapping=None, noise_covar=None) + with pytest.raises(ValueError, match="Cannot generate pdf from None-type covariance"): + model.pdf(State([0]), State([0])) + + @pytest.mark.parametrize( "h, ModelClass, state_vec, R , mapping,\ translation_offset, rotation_offset", diff --git a/stonesoup/models/transition/tests/test_time_invariant.py b/stonesoup/models/transition/tests/test_time_invariant.py index 48cd6b70..be4870c6 100644 --- a/stonesoup/models/transition/tests/test_time_invariant.py +++ b/stonesoup/models/transition/tests/test_time_invariant.py @@ -5,6 +5,7 @@ import numpy as np from ..linear import LinearGaussianTimeInvariantTransitionModel from ....types.state import State +import pytest def test_linear_gaussian(): @@ -23,3 +24,9 @@ def test_linear_gaussian(): noise=np.zeros([3, 1]))) assert isinstance(model.rvs(), np.ndarray) assert isinstance(model.pdf(State(x_2), State(x_1)), Real) + + model = LinearGaussianTimeInvariantTransitionModel(transition_matrix=F, covariance_matrix=None) + with pytest.raises(ValueError, match="Cannot generate rvs from None-type covariance"): + model.rvs() + with pytest.raises(ValueError, match="Cannot generate pdf from None-type covariance"): + model.pdf(State([0]), State([0]))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
0.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 branca==0.8.1 certifi==2025.1.31 charset-normalizer==3.4.1 contourpy==1.3.0 coverage==7.8.0 cycler==0.12.1 docutils==0.21.2 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work flake8==7.2.0 folium==0.19.5 fonttools==4.56.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 importlib_resources==6.5.2 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.1.6 kiwisolver==1.4.7 MarkupSafe==3.0.2 matplotlib==3.9.4 mccabe==0.7.0 numpy==2.0.2 packaging @ file:///croot/packaging_1734472117206/work pillow==11.1.0 pluggy @ file:///croot/pluggy_1733169602837/work pycodestyle==2.13.0 pyflakes==3.3.2 Pygments==2.19.1 pymap3d==3.1.0 pyparsing==3.2.3 pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 pytest-flake8==1.3.0 python-dateutil==2.9.0.post0 requests==2.32.3 ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.12 scipy==1.13.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-gallery==0.19.0 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 -e git+https://github.com/dstl/Stone-Soup.git@0194cff0ed89a5a0de0d018a8d8ff067087e5df2#egg=stonesoup tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work urllib3==2.3.0 utm==0.8.1 xyzservices==2025.1.0 zipp==3.21.0
name: Stone-Soup channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - branca==0.8.1 - certifi==2025.1.31 - charset-normalizer==3.4.1 - contourpy==1.3.0 - coverage==7.8.0 - cycler==0.12.1 - docutils==0.21.2 - flake8==7.2.0 - folium==0.19.5 - fonttools==4.56.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - importlib-resources==6.5.2 - jinja2==3.1.6 - kiwisolver==1.4.7 - markupsafe==3.0.2 - matplotlib==3.9.4 - mccabe==0.7.0 - numpy==2.0.2 - pillow==11.1.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pygments==2.19.1 - pymap3d==3.1.0 - pyparsing==3.2.3 - pytest-cov==6.0.0 - pytest-flake8==1.3.0 - python-dateutil==2.9.0.post0 - requests==2.32.3 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.12 - scipy==1.13.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-gallery==0.19.0 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - urllib3==2.3.0 - utm==0.8.1 - xyzservices==2025.1.0 - zipp==3.21.0 prefix: /opt/conda/envs/Stone-Soup
[ "stonesoup/models/measurement/tests/test_combined.py::test_none_covar", "stonesoup/models/measurement/tests/test_models.py::test_none_covar[LinearGaussian]", "stonesoup/models/measurement/tests/test_models.py::test_none_covar[CartesianToElevationBearingRange]", "stonesoup/models/measurement/tests/test_models.py::test_none_covar[CartesianToBearingRange]", "stonesoup/models/measurement/tests/test_models.py::test_none_covar[CartesianToElevationBearing]", "stonesoup/models/measurement/tests/test_models.py::test_none_covar[Cartesian2DToBearing]", "stonesoup/models/measurement/tests/test_models.py::test_none_covar[CartesianToBearingRangeRate]", "stonesoup/models/measurement/tests/test_models.py::test_none_covar[CartesianToElevationBearingRangeRate]" ]
[ "stonesoup/models/measurement/tests/test_combined.py::test_jacobian", "stonesoup/models/measurement/tests/test_combined.py::test_non_linear_and_linear", "stonesoup/models/measurement/tests/test_models.py::test_models[Bearing1]", "stonesoup/models/measurement/tests/test_models.py::test_models[Bearing2]", "stonesoup/models/measurement/tests/test_models.py::test_models[BearingElevation1]", "stonesoup/models/measurement/tests/test_models.py::test_models[BearingElevation2]", "stonesoup/models/measurement/tests/test_models.py::test_models[RangeBearingElevation1_0]", "stonesoup/models/measurement/tests/test_models.py::test_models[RangeBearingElevation1_1]", "stonesoup/models/measurement/tests/test_models.py::test_models[BearingsOnly1]", "stonesoup/models/measurement/tests/test_models.py::test_models[BearingsOnly2]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state0-target_state0-expected_measurement0]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state1-target_state1-expected_measurement1]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state2-target_state2-expected_measurement2]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state3-target_state3-expected_measurement3]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state4-target_state4-expected_measurement4]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state5-target_state5-expected_measurement5]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state6-target_state6-expected_measurement6]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state7-target_state7-expected_measurement7]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state8-target_state8-expected_measurement8]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state9-target_state9-expected_measurement9]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state10-target_state10-expected_measurement10]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state11-target_state11-expected_measurement11]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state12-target_state12-expected_measurement12]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state13-target_state13-expected_measurement13]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state14-target_state14-expected_measurement14]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearing-measure_mapping0-False-sensor_state15-target_state15-expected_measurement15]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state0-target_state0-expected_measurement0]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state1-target_state1-expected_measurement1]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state2-target_state2-expected_measurement2]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state3-target_state3-expected_measurement3]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state4-target_state4-expected_measurement4]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state5-target_state5-expected_measurement5]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state6-target_state6-expected_measurement6]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state7-target_state7-expected_measurement7]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state8-target_state8-expected_measurement8]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state9-target_state9-expected_measurement9]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state10-target_state10-expected_measurement10]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state11-target_state11-expected_measurement11]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state12-target_state12-expected_measurement12]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state13-target_state13-expected_measurement13]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state14-target_state14-expected_measurement14]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRange-measure_mapping1-False-sensor_state15-target_state15-expected_measurement15]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state0-target_state0-expected_measurement0]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state1-target_state1-expected_measurement1]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state2-target_state2-expected_measurement2]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state3-target_state3-expected_measurement3]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state4-target_state4-expected_measurement4]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state5-target_state5-expected_measurement5]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state6-target_state6-expected_measurement6]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state7-target_state7-expected_measurement7]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state8-target_state8-expected_measurement8]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state9-target_state9-expected_measurement9]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state10-target_state10-expected_measurement10]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state11-target_state11-expected_measurement11]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state12-target_state12-expected_measurement12]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state13-target_state13-expected_measurement13]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state14-target_state14-expected_measurement14]", "stonesoup/models/measurement/tests/test_models.py::test_model_predictions[CartesianToElevationBearingRangeRate-measure_mapping2-True-sensor_state15-target_state15-expected_measurement15]", "stonesoup/models/measurement/tests/test_models.py::test_rangeratemodels[rrRB_1]", "stonesoup/models/measurement/tests/test_models.py::test_rangeratemodels[rrRB_2]", "stonesoup/models/measurement/tests/test_models.py::test_rangeratemodels[rrRBE_1]", "stonesoup/models/measurement/tests/test_models.py::test_rangeratemodels[rrRBE_2]", "stonesoup/models/measurement/tests/test_models.py::test_inverse_function", "stonesoup/models/transition/tests/test_time_invariant.py::test_linear_gaussian" ]
[ "stonesoup/models/measurement/tests/test_combined.py::test_non_linear", "stonesoup/models/measurement/tests/test_combined.py::test_covar", "stonesoup/models/measurement/tests/test_combined.py::test_inverse", "stonesoup/models/measurement/tests/test_combined.py::test_rvs", "stonesoup/models/measurement/tests/test_combined.py::test_pdf", "stonesoup/models/measurement/tests/test_combined.py::test_mismatch_ndim_state", "stonesoup/models/measurement/tests/test_models.py::test_angle_pdf" ]
[]
MIT License
9,089
385
[ "stonesoup/models/base.py" ]
eWaterCycle__era5cli-58
2c2dffd3aabe02e29461e8e8bfb6ac9ce85c4465
2020-12-02 13:33:29
afdea52f99886d7282faa21b98d15895189f0cdd
diff --git a/era5cli/cli.py b/era5cli/cli.py index a13ce9c..4dc2de2 100644 --- a/era5cli/cli.py +++ b/era5cli/cli.py @@ -5,6 +5,8 @@ import argparse import textwrap import sys +from datetime import datetime + import era5cli.inputref as ref import era5cli.info as einfo import era5cli.fetch as efetch @@ -127,6 +129,17 @@ def _build_parser(): ''') ) + common.add_argument( + "--prelimbe", action="store_true", default=False, + help=textwrap.dedent('''\ + Whether to download the preliminary back extension + (1950-1978). Providing the + "--prelimbe" argument downloads data from + the preliminary back extension. + + ''') + ) + mnth = argparse.ArgumentParser(add_help=False) mnth.add_argument( @@ -271,6 +284,32 @@ def _run_info(args): return True +def _construct_year_list(args): + if not args.endyear: + endyear = args.startyear + else: + endyear = args.endyear + + # check whether correct years have been entered + for year in (args.startyear, endyear): + if args.prelimbe: + assert 1950 <= year <= 1978, ( + 'year should be between 1950 and 1978' + ) + else: + assert 1979 <= year <= datetime.now().year, ( + 'year should be between 1979 and present' + ) + + assert endyear >= args.startyear, ( + 'endyear should be >= startyear or None') + + # make list of years to be downloaded + years = list(range(args.startyear, endyear + 1)) + + return years + + def _set_period_args(args): # set subroutine specific arguments for monthly and hourly fetch if args.command == "monthly": @@ -288,6 +327,11 @@ def _set_period_args(args): elif args.command == "hourly": synoptic = None statistics = args.statistics + if statistics: + assert args.ensemble, ( + "Statistics can only be computed over an ensemble, " + "add --ensemble or remove --statistics." + ) days = args.days hours = args.hours else: @@ -305,30 +349,26 @@ def _execute(args): # the fetching subroutines else: - # make list of years to be downloaded - if not args.endyear: - years = [args.startyear] - else: - assert (args.endyear >= args.startyear), ( - 'endyear should be >= startyear or None') - years = list(range(args.startyear, args.endyear + 1)) - + years = _construct_year_list(args) synoptic, statistics, days, hours = _set_period_args(args) # try to build and send download request - era5 = efetch.Fetch(years, - months=args.months, - days=days, - hours=hours, - variables=args.variables, - outputformat=args.format, - outputprefix=args.outputprefix, - period=args.command, - ensemble=args.ensemble, - synoptic=synoptic, - statistics=statistics, - pressurelevels=args.levels, - threads=args.threads, - merge=args.merge) + era5 = efetch.Fetch( + years, + months=args.months, + days=days, + hours=hours, + variables=args.variables, + outputformat=args.format, + outputprefix=args.outputprefix, + period=args.command, + ensemble=args.ensemble, + synoptic=synoptic, + statistics=statistics, + pressurelevels=args.levels, + threads=args.threads, + merge=args.merge, + prelimbe=args.prelimbe, + ) era5.fetch(dryrun=args.dryrun) return True diff --git a/era5cli/fetch.py b/era5cli/fetch.py index 6333e79..d67b1e6 100644 --- a/era5cli/fetch.py +++ b/era5cli/fetch.py @@ -57,13 +57,15 @@ class Fetch: Indicating if files should be downloaded. By default files will be downloaded. For a dryrun the cdsapi request will be written to stdout. + prelimbe: bool + Whether to download the preliminary back extension (1950-1978). """ def __init__(self, years: list, months: list, days: list, hours: list, variables: list, outputformat: str, outputprefix: str, period: str, ensemble: bool, statistics=None, synoptic=None, pressurelevels=None, - merge=False, threads=None): + merge=False, threads=None, prelimbe=False): """Initialization of Fetch class.""" self.months = era5cli.utils._zpad_months(months) """list(str): List of zero-padded strings of months @@ -107,6 +109,9 @@ class Fetch: """bool: Whether to get monthly averaged by hour of day (synoptic=True) or monthly means of daily means (synoptic=False).""" + self.prelimbe = prelimbe + """bool: Whether to select from the ERA5 preliminary back + extension which supports years from 1950 to 1978""" def fetch(self, dryrun=False): """Split calls and fetch results. @@ -195,17 +200,25 @@ class Fetch: elif not self.ensemble: producttype += "reanalysis" - if self.period == "monthly": + if self.period == "monthly" and not self.prelimbe: producttype = "monthly_averaged_" + producttype if self.synoptic: producttype += "_by_hour_of_day" - elif self.period == "hourly": - if self.ensemble and self.statistics: - producttype = [ - "ensemble_members", - "ensemble_mean", - "ensemble_spread", - ] + elif self.period == "monthly" and self.prelimbe: + if self.ensemble: + producttype = "members-" + elif not self.ensemble: + producttype = "reanalysis-" + if self.synoptic: + producttype += "synoptic-monthly-means" + elif not self.synoptic: + producttype += "monthly-means-of-daily-means" + elif self.period == "hourly" and self.ensemble and self.statistics: + producttype = [ + "ensemble_members", + "ensemble_mean", + "ensemble_spread", + ] return producttype @@ -253,6 +266,9 @@ class Fetch: if self.days: request["day"] = self.days + if self.prelimbe: + name += "-preliminary-back-extension" + return(name, request) def _exit(self):
Unexpected behavior for --statistics flag When downloading hourly data you can select the `--statistics` flag without using the `--ensemble` flag. This will result in downloading the reanalysis data instead and will ignore the `--statistics` flag. The `--statistics` flag without `--ensemble` should at least give a warning or an error, as adding this flag is meaningless when not downloading ensemble data.
eWaterCycle/era5cli
diff --git a/tests/test_cli.py b/tests/test_cli.py index 2be478d..13c1cda 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -47,6 +47,14 @@ def test_period_args(): # Period_args consists of (synoptic, statistics, days, hours) assert period_args == (True, None, None, [4, 7]) + argv = ['monthly', '--startyear', '2008', + '--variables', 'total_precipitation', + '--synoptic', '--ensemble'] + args = cli._parse_args(argv) + period_args = cli._set_period_args(args) + # Period_args consists of (synoptic, statistics, days, hours) + assert period_args == (True, None, None, range(0, 24)) + # test whether the info option does not end up in _set_period_args argv = ['info', '2Dvars'] args = cli._parse_args(argv) @@ -71,6 +79,22 @@ def test_main_fetch(fetch): with pytest.raises(AssertionError): assert cli._execute(args) + # should give an AssertionError if years are out of bounds + argv = ['hourly', '--startyear', '1950', + '--variables', 'total_precipitation', '--statistics', + '--endyear', '2007', '--ensemble'] + args = cli._parse_args(argv) + with pytest.raises(AssertionError): + assert cli._execute(args) + + # should give an AssertionError if years are out of bounds + argv = ['hourly', '--startyear', '1950', + '--variables', 'total_precipitation', '--statistics', + '--endyear', '2007', '--ensemble', '--prelimbe'] + args = cli._parse_args(argv) + with pytest.raises(AssertionError): + assert cli._execute(args) + # monthly call without endyear argv = ['monthly', '--startyear', '2008', '--variables', 'total_precipitation', '--synoptic', diff --git a/tests/test_fetch.py b/tests/test_fetch.py index 1690c26..cff9ff8 100644 --- a/tests/test_fetch.py +++ b/tests/test_fetch.py @@ -9,7 +9,8 @@ def initialize(outputformat='netcdf', merge=False, statistics=None, synoptic=None, ensemble=True, pressurelevels=None, threads=2, period='hourly', variables=['total_precipitation'], years=[2008, 2009], months=list(range(1, 13)), - days=list(range(1, 32)), hours=list(range(0, 24))): + days=list(range(1, 32)), hours=list(range(0, 24)), + prelimbe=False): """Initializer of the class.""" era5 = fetch.Fetch(years=years, months=months, @@ -24,7 +25,8 @@ def initialize(outputformat='netcdf', merge=False, statistics=None, synoptic=synoptic, pressurelevels=pressurelevels, merge=merge, - threads=threads) + threads=threads, + prelimbe=prelimbe) return era5 @@ -43,7 +45,8 @@ def test_init(): synoptic=None, pressurelevels=None, merge=False, - threads=2) + threads=2, + prelimbe=False) valid_months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'] @@ -71,6 +74,7 @@ def test_init(): assert era5.pressure_levels is None assert not era5.merge assert era5.threads == 2 + assert not era5.prelimbe # initializing hourly variable with days=None should result in ValueError with pytest.raises(TypeError): @@ -247,10 +251,24 @@ def test_product_type(): producttype = era5._product_type() assert producttype == 'monthly_averaged_reanalysis' + era5.prelimbe = True + producttype = era5._product_type() + assert producttype == 'reanalysis-monthly-means-of-daily-means' + + era5.prelimbe = False era5.synoptic = True producttype = era5._product_type() assert producttype == 'monthly_averaged_reanalysis_by_hour_of_day' + era5.prelimbe = True + producttype = era5._product_type() + assert producttype == 'reanalysis-synoptic-monthly-means' + + era5.ensemble = True + producttype = era5._product_type() + assert producttype == 'members-synoptic-monthly-means' + + era5.prelimbe = False era5.ensemble = False era5.statistics = True producttype = era5._product_type() @@ -278,7 +296,6 @@ def test_build_request(): '12:00', '13:00', '14:00', '15:00', '16:00', '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'], 'format': 'netcdf'} - print(request['day']) assert request == req # monthly data @@ -298,6 +315,28 @@ def test_build_request(): 'format': 'netcdf'} assert request == req + # preliminary back extension + era5 = initialize(period='monthly', + variables=['total_precipitation'], + years=[1970], + prelimbe=True) + (name, request) = era5._build_request('total_precipitation', [1970]) + print(request) + assert name == ( + "reanalysis-era5-single-levels-monthly" + "-means-preliminary-back-extension" + ) + req = {'variable': 'total_precipitation', 'year': [1970], + 'product_type': 'members-monthly-means-of-daily-means', + 'month': ['01', '02', '03', '04', '05', '06', + '07', '08', '09', '10', '11', '12'], + 'time': ['00:00', '01:00', '02:00', '03:00', '04:00', '05:00', + '06:00', '07:00', '08:00', '09:00', '10:00', '11:00', + '12:00', '13:00', '14:00', '15:00', '16:00', '17:00', + '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'], + 'format': 'netcdf'} + assert request == req + # requesting 3d variable with pressurelevels=None should give a ValueError era5 = initialize(variables=['temperature'], pressurelevels=None) with pytest.raises(ValueError):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 2 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc libhdf5-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 cdsapi==0.7.5 certifi==2025.1.31 cftime==1.6.4.post1 charset-normalizer==3.4.1 datapi==0.3.0 dill==0.3.9 -e git+https://github.com/eWaterCycle/era5cli.git@2c2dffd3aabe02e29461e8e8bfb6ac9ce85c4465#egg=era5cli exceptiongroup==1.2.2 idna==3.10 iniconfig==2.1.0 multiprocess==0.70.17 multiurl==0.3.5 netCDF4==1.7.2 numpy==2.0.2 packaging==24.2 pathos==0.3.3 pluggy==1.5.0 pox==0.3.5 ppft==1.7.6.9 PTable==0.9.2 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.32.3 six==1.17.0 tomli==2.2.1 tqdm==4.67.1 typing_extensions==4.13.0 urllib3==2.3.0
name: era5cli channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - cdsapi==0.7.5 - certifi==2025.1.31 - cftime==1.6.4.post1 - charset-normalizer==3.4.1 - datapi==0.3.0 - dill==0.3.9 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - multiprocess==0.70.17 - multiurl==0.3.5 - netcdf4==1.7.2 - numpy==2.0.2 - packaging==24.2 - pathos==0.3.3 - pluggy==1.5.0 - pox==0.3.5 - ppft==1.7.6.9 - ptable==0.9.2 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.32.3 - six==1.17.0 - tomli==2.2.1 - tqdm==4.67.1 - typing-extensions==4.13.0 - urllib3==2.3.0 prefix: /opt/conda/envs/era5cli
[ "tests/test_cli.py::test_main_fetch", "tests/test_fetch.py::test_init", "tests/test_fetch.py::test_fetch_nodryrun", "tests/test_fetch.py::test_fetch_dryrun", "tests/test_fetch.py::test_extension", "tests/test_fetch.py::test_define_outputfilename", "tests/test_fetch.py::test_number_outputfiles", "tests/test_fetch.py::test_product_type", "tests/test_fetch.py::test_build_request" ]
[]
[ "tests/test_cli.py::test_parse_args", "tests/test_cli.py::test_period_args", "tests/test_cli.py::test_main_info" ]
[]
Apache License 2.0
9,090
1,700
[ "era5cli/cli.py", "era5cli/fetch.py" ]
elastic__elasticsearch-dsl-py-1464
3e5703f0c8f6f682c7eea3db9413470876cb254f
2020-12-03 16:05:45
fc14803338c0f003c173dec809f37154a6310012
diff --git a/elasticsearch_dsl/query.py b/elasticsearch_dsl/query.py index cedd0a7..642ce2c 100644 --- a/elasticsearch_dsl/query.py +++ b/elasticsearch_dsl/query.py @@ -499,6 +499,11 @@ class Script(Query): name = "script" +class ScriptScore(Query): + name = "script_score" + _param_defs = {"query": {"type": "query"}} + + class Type(Query): name = "type"
script_score not working Using `script_score` throws `ValueError(Q() can only accept dict with a single query...` ```json { "query": { "script_score": { "query": { "match_all": {} }, "script": { "source": """ double weight = 1; if (doc['type'].value == 'REVIEW') { weight = 1.03; } return weight * decayDateExp(params.origin, params.scale, params.offset, params.decay, doc['modified'].value); """, "params": { "origin": "2020-10-30T00:00:00", "scale": "30d", "offset": "1h", "decay": 0.8 } } } } } ``` I'm trying to convert the above but so far unsuccessful.
elastic/elasticsearch-dsl-py
diff --git a/tests/test_query.py b/tests/test_query.py index 2e58040..2c9823e 100644 --- a/tests/test_query.py +++ b/tests/test_query.py @@ -538,3 +538,18 @@ def test_function_score_from_dict(): assert isinstance(sf, function.BoostFactor) assert 6 == sf.value assert {"boost_factor": 6} == sf.to_dict() + + +def test_script_score(): + d = { + "script_score": { + "query": {"match_all": {}}, + "script": {"source": "...", "params": {}}, + } + } + q = query.Q(d) + + assert isinstance(q, query.ScriptScore) + assert isinstance(q.query, query.MatchAll) + assert q.script == {"source": "...", "params": {}} + assert q.to_dict() == d
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
7.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[develop]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.8", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==4.5.4 docutils==0.20.1 elasticsearch==7.17.12 -e git+https://github.com/elastic/elasticsearch-dsl-py.git@3e5703f0c8f6f682c7eea3db9413470876cb254f#egg=elasticsearch_dsl exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work idna==3.10 imagesize==1.4.1 importlib_metadata==8.5.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.1.6 MarkupSafe==2.1.5 mock==5.2.0 packaging @ file:///croot/packaging_1720101850331/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042571233/work Pygments==2.19.1 pytest @ file:///croot/pytest_1717793244625/work pytest-cov==2.10.1 pytest-mock==2.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.32.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.1.2 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==1.0.4 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.1 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work urllib3==1.26.20 zipp==3.20.2
name: elasticsearch-dsl-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py38h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.1=py38h06a4308_0 - pip=24.2=py38h06a4308_0 - pluggy=1.0.0=py38h06a4308_1 - pytest=7.4.4=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py38h06a4308_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==4.5.4 - docutils==0.20.1 - elasticsearch==7.17.12 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.5.0 - jinja2==3.1.6 - markupsafe==2.1.5 - mock==5.2.0 - pygments==2.19.1 - pytest-cov==2.10.1 - pytest-mock==2.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.32.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.1.2 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==1.0.4 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.1 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - urllib3==1.26.20 - zipp==3.20.2 prefix: /opt/conda/envs/elasticsearch-dsl-py
[ "tests/test_query.py::test_script_score" ]
[]
[ "tests/test_query.py::test_empty_Q_is_match_all", "tests/test_query.py::test_match_to_dict", "tests/test_query.py::test_match_to_dict_extra", "tests/test_query.py::test_fuzzy_to_dict", "tests/test_query.py::test_prefix_to_dict", "tests/test_query.py::test_term_to_dict", "tests/test_query.py::test_bool_to_dict", "tests/test_query.py::test_dismax_to_dict", "tests/test_query.py::test_bool_from_dict_issue_318", "tests/test_query.py::test_repr", "tests/test_query.py::test_query_clone", "tests/test_query.py::test_bool_converts_its_init_args_to_queries", "tests/test_query.py::test_two_queries_make_a_bool", "tests/test_query.py::test_other_and_bool_appends_other_to_must", "tests/test_query.py::test_bool_and_other_appends_other_to_must", "tests/test_query.py::test_bool_and_other_sets_min_should_match_if_needed", "tests/test_query.py::test_bool_with_different_minimum_should_match_should_not_be_combined", "tests/test_query.py::test_empty_bool_has_min_should_match_0", "tests/test_query.py::test_query_and_query_creates_bool", "tests/test_query.py::test_match_all_and_query_equals_other", "tests/test_query.py::test_not_match_all_is_match_none", "tests/test_query.py::test_not_match_none_is_match_all", "tests/test_query.py::test_invert_empty_bool_is_match_none", "tests/test_query.py::test_match_none_or_query_equals_query", "tests/test_query.py::test_match_none_and_query_equals_match_none", "tests/test_query.py::test_bool_and_bool", "tests/test_query.py::test_bool_and_bool_with_min_should_match", "tests/test_query.py::test_inverted_query_becomes_bool_with_must_not", "tests/test_query.py::test_inverted_query_with_must_not_become_should", "tests/test_query.py::test_inverted_query_with_must_and_must_not", "tests/test_query.py::test_double_invert_returns_original_query", "tests/test_query.py::test_bool_query_gets_inverted_internally", "tests/test_query.py::test_match_all_or_something_is_match_all", "tests/test_query.py::test_or_produces_bool_with_should", "tests/test_query.py::test_or_bool_doesnt_loop_infinitely_issue_37", "tests/test_query.py::test_or_bool_doesnt_loop_infinitely_issue_96", "tests/test_query.py::test_bool_will_append_another_query_with_or", "tests/test_query.py::test_bool_queries_with_only_should_get_concatenated", "tests/test_query.py::test_two_bool_queries_append_one_to_should_if_possible", "tests/test_query.py::test_queries_are_registered", "tests/test_query.py::test_defining_query_registers_it", "tests/test_query.py::test_Q_passes_query_through", "tests/test_query.py::test_Q_constructs_query_by_name", "tests/test_query.py::test_Q_translates_double_underscore_to_dots_in_param_names", "tests/test_query.py::test_Q_doesn_translate_double_underscore_to_dots_in_param_names", "tests/test_query.py::test_Q_constructs_simple_query_from_dict", "tests/test_query.py::test_Q_constructs_compound_query_from_dict", "tests/test_query.py::test_Q_raises_error_when_passed_in_dict_and_params", "tests/test_query.py::test_Q_raises_error_when_passed_in_query_and_params", "tests/test_query.py::test_Q_raises_error_on_unknown_query", "tests/test_query.py::test_match_all_and_anything_is_anything", "tests/test_query.py::test_function_score_with_functions", "tests/test_query.py::test_function_score_with_no_function_is_boost_factor", "tests/test_query.py::test_function_score_to_dict", "tests/test_query.py::test_function_score_with_single_function", "tests/test_query.py::test_function_score_from_dict" ]
[]
Apache License 2.0
9,104
125
[ "elasticsearch_dsl/query.py" ]
ASFHyP3__hyp3-sdk-51
67e33235f7dc3b98241fe34d97a4fae58873590c
2020-12-07 19:19:41
56cfb700341a0de44ee0f2f3548d5ed6c534d659
diff --git a/hyp3_sdk/hyp3.py b/hyp3_sdk/hyp3.py index 7d90095..baf69f4 100644 --- a/hyp3_sdk/hyp3.py +++ b/hyp3_sdk/hyp3.py @@ -6,6 +6,7 @@ from urllib.parse import urljoin from requests.exceptions import HTTPError, RequestException +import hyp3_sdk from hyp3_sdk.exceptions import HyP3Error, ValidationError from hyp3_sdk.jobs import Batch, Job from hyp3_sdk.util import get_authenticated_session @@ -28,6 +29,7 @@ class HyP3: """ self.url = api_url self.session = get_authenticated_session(username, password) + self.session.headers.update({'User-Agent': f'{hyp3_sdk.__name__}/{hyp3_sdk.__version__}'}) def find_jobs(self, start: Optional[datetime] = None, end: Optional[datetime] = None, status: Optional[str] = None, name: Optional[str] = None) -> Batch:
Add custom User Agent header to hyp3 api session e.g. `User-Agent: hyp3-sdk v0.1.2` so we can identify SDK-generated requests in the API access logs, separate from other requests made via `requests`.
ASFHyP3/hyp3-sdk
diff --git a/tests/test_hyp3.py b/tests/test_hyp3.py index 626ee05..9aa05e9 100644 --- a/tests/test_hyp3.py +++ b/tests/test_hyp3.py @@ -1,4 +1,3 @@ -import json from datetime import datetime, timedelta from urllib.parse import urljoin @@ -10,6 +9,18 @@ from hyp3_sdk import HyP3, Job hyp3_sdk.TESTING = True [email protected] +def test_session_headers(): + api = HyP3() + responses.add(responses.GET, urljoin(api.url, '/user'), json={'foo': 'bar'}) + + api.session.get(urljoin(api.url, '/user')) + assert responses.calls[0].request.headers['User-Agent'] == f'hyp3_sdk/{hyp3_sdk.__version__}' + + api.my_info() + assert responses.calls[1].request.headers['User-Agent'] == f'hyp3_sdk/{hyp3_sdk.__version__}' + + @responses.activate def test_find_jobs(get_mock_job): api_response_mock = { @@ -23,7 +34,7 @@ def test_find_jobs(get_mock_job): ] } api = HyP3() - responses.add(responses.GET, urljoin(api.url, '/jobs'), body=json.dumps(api_response_mock)) + responses.add(responses.GET, urljoin(api.url, '/jobs'), json=api_response_mock) response = api.find_jobs() assert len(response) == 3 @@ -32,7 +43,7 @@ def test_find_jobs(get_mock_job): def test_get_job_by_id(get_mock_job): job = get_mock_job() api = HyP3() - responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), body=json.dumps(job.to_dict())) + responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), json=job.to_dict()) response = api._get_job_by_id(job.job_id) assert response == job @@ -45,9 +56,9 @@ def test_watch(get_mock_job): api = HyP3() for ii in range(3): responses.add(responses.GET, urljoin(api.url, f'/jobs/{incomplete_job.job_id}'), - body=json.dumps(incomplete_job.to_dict())) + json=incomplete_job.to_dict()) responses.add(responses.GET, urljoin(api.url, f'/jobs/{incomplete_job.job_id}'), - body=json.dumps(complete_job.to_dict())) + json=complete_job.to_dict()) response = api.watch(incomplete_job, interval=0.05) assert response == complete_job responses.assert_call_count(urljoin(api.url, f'/jobs/{incomplete_job.job_id}'), 4) @@ -60,7 +71,7 @@ def test_refresh(get_mock_job): new_job.status_code = 'SUCCEEDED' api = HyP3() - responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), body=json.dumps(new_job.to_dict())) + responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), json=new_job.to_dict()) response = api.refresh(job) assert response == new_job @@ -74,7 +85,7 @@ def test_submit_job_dict(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_job_dict(job.to_dict(for_resubmit=True)) assert response == job @@ -88,7 +99,7 @@ def test_submit_autorift_job(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_autorift_job('g1', 'g2') assert response == job @@ -102,7 +113,7 @@ def test_submit_rtc_job(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_rtc_job('g1') assert response == job @@ -116,7 +127,7 @@ def test_submit_insar_job(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_insar_job('g1', 'g2') assert response == job @@ -135,7 +146,7 @@ def test_my_info(): 'user_id': 'someUser' } api = HyP3() - responses.add(responses.GET, urljoin(api.url, '/user'), body=json.dumps(api_response)) + responses.add(responses.GET, urljoin(api.url, '/user'), json=api_response) response = api.my_info() assert response == api_response @@ -154,6 +165,6 @@ def test_check_quota(): 'user_id': 'someUser' } api = HyP3() - responses.add(responses.GET, urljoin(api.url, '/user'), body=json.dumps(api_response)) + responses.add(responses.GET, urljoin(api.url, '/user'), json=api_response) response = api.check_quota() assert response == api_response['quota']['remaining']
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": [ "conda-env.yml" ], "install": "python -m pip install -e .[develop]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "environment.yml", "pip_packages": null, "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
annotated-types==0.7.0 Authlib==1.5.1 babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1738490167835/work backrefs @ file:///home/conda/feedstock_root/build_artifacts/backrefs_1740887580136/work boto3==1.37.23 boto3-stubs==1.37.23 botocore==1.37.23 botocore-stubs==1.37.23 Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1725267488082/work certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1739515848642/work/certifi cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1725571112467/work charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1735929714516/work click @ file:///home/conda/feedstock_root/build_artifacts/click_1734858813237/work colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1733218098505/work coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1743381224823/work cryptography==44.0.2 dparse==0.6.4 exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1733208806608/work filelock==3.16.1 ghp-import @ file:///home/conda/feedstock_root/build_artifacts/ghp-import_1734344360713/work h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1738578511449/work hpack @ file:///home/conda/feedstock_root/build_artifacts/hpack_1737618293087/work -e git+https://github.com/ASFHyP3/hyp3-sdk.git@67e33235f7dc3b98241fe34d97a4fae58873590c#egg=hyp3_sdk hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1737618333194/work idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1733211830134/work importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1737420181517/work iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1733223141826/work Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1741263328855/work jmespath==1.0.1 joblib==1.4.2 Markdown @ file:///home/conda/feedstock_root/build_artifacts/markdown_1710435156458/work markdown-it-py==3.0.0 MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1733219680183/work marshmallow==3.26.1 mdurl==0.1.2 mergedeep @ file:///home/conda/feedstock_root/build_artifacts/mergedeep_1734156985434/work mkdocs @ file:///home/conda/feedstock_root/build_artifacts/mkdocs_1734344575329/work mkdocs-autorefs==1.4.1 mkdocs-get-deps @ file:///home/conda/feedstock_root/build_artifacts/mkdocs-get-deps_1734352941277/work mkdocs-material @ file:///home/conda/feedstock_root/build_artifacts/mkdocs-material_1743393634987/work mkdocs-material-extensions @ file:///home/conda/feedstock_root/build_artifacts/mkdocs-material-extensions_1734640982920/work mkdocstrings==0.29.1 mypy-boto3-dynamodb==1.37.12 mypy-boto3-s3==1.37.0 nltk==3.9.1 packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work paginate @ file:///home/conda/feedstock_root/build_artifacts/paginate_1734618550153/work pathspec @ file:///home/conda/feedstock_root/build_artifacts/pathspec_1733233363808/work platformdirs @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_platformdirs_1742485085/work pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1733222765875/work psutil==6.1.1 pycparser @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pycparser_1733195786/work pydantic==2.9.2 pydantic_core==2.23.4 Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1736243443484/work pymdown-extensions @ file:///home/conda/feedstock_root/build_artifacts/pymdown-extensions_1738439084124/work PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1733217236728/work pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1740946542080/work pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1733223023082/work python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1733215673016/work pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1742920838005/work PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1737454647378/work pyyaml_env_tag @ file:///home/conda/feedstock_root/build_artifacts/pyyaml-env-tag_1734344268003/work regex==2024.11.6 requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1733217035951/work responses @ file:///home/conda/feedstock_root/build_artifacts/responses_1741755837680/work rich==14.0.0 ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.12 s3pypi==2.0.1 s3transfer==0.11.4 safety==3.3.1 safety-schemas==0.0.11 setuptools-scm @ file:///home/conda/feedstock_root/build_artifacts/setuptools_scm_1742403392659/work shellingham==1.5.4 six @ file:///home/conda/feedstock_root/build_artifacts/six_1733380938961/work toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1734091811753/work tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1733256695513/work tqdm==4.67.1 typer==0.15.2 types-awscrt==0.24.2 types-PyYAML @ file:///home/conda/feedstock_root/build_artifacts/types-pyyaml_1735564787326/work types-s3transfer==0.11.4 typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_typing_extensions_1743201626/work urllib3==1.26.20 watchdog @ file:///home/conda/feedstock_root/build_artifacts/watchdog_1730492870473/work zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1732827521216/work zstandard==0.23.0
name: hyp3-sdk channels: - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=conda_forge - _openmp_mutex=4.5=2_gnu - babel=2.17.0=pyhd8ed1ab_0 - backrefs=5.8=pyhd8ed1ab_0 - brotli-python=1.1.0=py39hf88036b_2 - bzip2=1.0.8=h4bc722e_7 - ca-certificates=2025.1.31=hbcca054_0 - certifi=2025.1.31=pyhd8ed1ab_0 - cffi=1.17.1=py39h15c3d72_0 - charset-normalizer=3.4.1=pyhd8ed1ab_0 - click=8.1.8=pyh707e725_0 - colorama=0.4.6=pyhd8ed1ab_1 - coverage=7.8.0=py39h9399b63_0 - exceptiongroup=1.2.2=pyhd8ed1ab_1 - ghp-import=2.1.0=pyhd8ed1ab_2 - h2=4.2.0=pyhd8ed1ab_0 - hpack=4.1.0=pyhd8ed1ab_0 - hyperframe=6.1.0=pyhd8ed1ab_0 - idna=3.10=pyhd8ed1ab_1 - importlib-metadata=8.6.1=pyha770c72_0 - iniconfig=2.0.0=pyhd8ed1ab_1 - jinja2=3.1.6=pyhd8ed1ab_0 - ld_impl_linux-64=2.43=h712a8e2_4 - libffi=3.4.6=h2dba641_0 - libgcc=14.2.0=h767d61c_2 - libgcc-ng=14.2.0=h69a702a_2 - libgomp=14.2.0=h767d61c_2 - liblzma=5.6.4=hb9d3cd8_0 - libnsl=2.0.1=hd590300_0 - libsqlite=3.49.1=hee588c1_2 - libstdcxx=14.2.0=h8f9b012_2 - libuuid=2.38.1=h0b41bf4_0 - libxcrypt=4.4.36=hd590300_1 - libzlib=1.3.1=hb9d3cd8_2 - markdown=3.6=pyhd8ed1ab_0 - markupsafe=3.0.2=py39h9399b63_1 - mergedeep=1.3.4=pyhd8ed1ab_1 - mkdocs=1.6.1=pyhd8ed1ab_1 - mkdocs-get-deps=0.2.0=pyhd8ed1ab_1 - mkdocs-material=9.6.10=pyhd8ed1ab_0 - mkdocs-material-extensions=1.3.1=pyhd8ed1ab_1 - ncurses=6.5=h2d0b736_3 - openssl=3.4.1=h7b32b05_0 - packaging=24.2=pyhd8ed1ab_2 - paginate=0.5.7=pyhd8ed1ab_1 - pathspec=0.12.1=pyhd8ed1ab_1 - pip=25.0.1=pyh8b19718_0 - platformdirs=4.3.7=pyh29332c3_0 - pluggy=1.5.0=pyhd8ed1ab_1 - pycparser=2.22=pyh29332c3_1 - pygments=2.19.1=pyhd8ed1ab_0 - pymdown-extensions=10.14.3=pyhd8ed1ab_0 - pysocks=1.7.1=pyha55dd90_7 - pytest=8.3.5=pyhd8ed1ab_0 - pytest-cov=6.0.0=pyhd8ed1ab_1 - python=3.9.21=h9c0c6dc_1_cpython - python-dateutil=2.9.0.post0=pyhff2d567_1 - python_abi=3.9=5_cp39 - pytz=2025.2=pyhd8ed1ab_0 - pyyaml=6.0.2=py39h9399b63_2 - pyyaml-env-tag=0.1=pyhd8ed1ab_1 - readline=8.2=h8c095d6_2 - requests=2.32.3=pyhd8ed1ab_1 - responses=0.25.7=pyhd8ed1ab_0 - setuptools=75.8.2=pyhff2d567_0 - setuptools-scm=8.2.1=pyhd8ed1ab_0 - setuptools_scm=8.2.1=hd8ed1ab_0 - six=1.17.0=pyhd8ed1ab_0 - tk=8.6.13=noxft_h4845f30_101 - toml=0.10.2=pyhd8ed1ab_1 - tomli=2.2.1=pyhd8ed1ab_1 - types-pyyaml=6.0.12.20241230=pyhd8ed1ab_0 - typing-extensions=4.13.0=h9fa5a19_1 - typing_extensions=4.13.0=pyh29332c3_1 - tzdata=2025b=h78e105d_0 - watchdog=6.0.0=py39hf3d152e_0 - wheel=0.45.1=pyhd8ed1ab_1 - yaml=0.2.5=h7f98852_2 - zipp=3.21.0=pyhd8ed1ab_1 - zstandard=0.23.0=py39h8cd3c5a_1 - pip: - annotated-types==0.7.0 - authlib==1.5.1 - boto3==1.37.23 - boto3-stubs==1.37.23 - botocore==1.37.23 - botocore-stubs==1.37.23 - cryptography==44.0.2 - dparse==0.6.4 - filelock==3.16.1 - hyp3-sdk==0.3.3.dev6+g67e3323 - jmespath==1.0.1 - joblib==1.4.2 - markdown-it-py==3.0.0 - marshmallow==3.26.1 - mdurl==0.1.2 - mkdocs-autorefs==1.4.1 - mkdocstrings==0.29.1 - mypy-boto3-dynamodb==1.37.12 - mypy-boto3-s3==1.37.0 - nltk==3.9.1 - psutil==6.1.1 - pydantic==2.9.2 - pydantic-core==2.23.4 - regex==2024.11.6 - rich==14.0.0 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.12 - s3pypi==2.0.1 - s3transfer==0.11.4 - safety==3.3.1 - safety-schemas==0.0.11 - shellingham==1.5.4 - tqdm==4.67.1 - typer==0.15.2 - types-awscrt==0.24.2 - types-s3transfer==0.11.4 - urllib3==1.26.20 prefix: /opt/conda/envs/hyp3-sdk
[ "tests/test_hyp3.py::test_session_headers" ]
[]
[ "tests/test_hyp3.py::test_find_jobs", "tests/test_hyp3.py::test_get_job_by_id", "tests/test_hyp3.py::test_watch", "tests/test_hyp3.py::test_refresh", "tests/test_hyp3.py::test_submit_job_dict", "tests/test_hyp3.py::test_submit_autorift_job", "tests/test_hyp3.py::test_submit_rtc_job", "tests/test_hyp3.py::test_submit_insar_job", "tests/test_hyp3.py::test_my_info", "tests/test_hyp3.py::test_check_quota" ]
[]
BSD 3-Clause "New" or "Revised" License
9,125
243
[ "hyp3_sdk/hyp3.py" ]
nipy__heudiconv-477
d855f64d5013f8a0e41789766a094d3c3a91552f
2020-12-07 20:54:01
ee5cebbb1b79bb2f865c3634c9b52095debc76a9
diff --git a/heudiconv/convert.py b/heudiconv/convert.py index 8e2c3c6..b88c232 100644 --- a/heudiconv/convert.py +++ b/heudiconv/convert.py @@ -235,7 +235,7 @@ def prep_conversion(sid, dicoms, outdir, heuristic, converter, anon_sid, def update_complex_name(metadata, filename, suffix): """ - Insert `_rec-<magnitude|phase>` entity into filename if data are from a + Insert `_part-<mag|phase>` entity into filename if data are from a sequence with magnitude/phase part. Parameters @@ -254,7 +254,10 @@ def update_complex_name(metadata, filename, suffix): Updated filename with rec entity added in appropriate position. """ # Some scans separate magnitude/phase differently - unsupported_types = ['_bold', '_phase', + # A small note: _phase is deprecated, but this may add part-mag to + # magnitude data while leaving phase data with a separate suffix, + # depending on how one sets up their heuristic. + unsupported_types = ['_phase', '_magnitude', '_magnitude1', '_magnitude2', '_phasediff', '_phase1', '_phase2'] if any(ut in filename for ut in unsupported_types): @@ -262,7 +265,7 @@ def update_complex_name(metadata, filename, suffix): # Check to see if it is magnitude or phase part: if 'M' in metadata.get('ImageType'): - mag_or_phase = 'magnitude' + mag_or_phase = 'mag' elif 'P' in metadata.get('ImageType'): mag_or_phase = 'phase' else: @@ -272,19 +275,19 @@ def update_complex_name(metadata, filename, suffix): filetype = '_' + filename.split('_')[-1] # Insert rec label - if not ('_rec-%s' % mag_or_phase) in filename: - # If "_rec-" is specified, prepend the 'mag_or_phase' value. - if '_rec-' in filename: + if not ('_part-%s' % mag_or_phase) in filename: + # If "_part-" is specified, prepend the 'mag_or_phase' value. + if '_part-' in filename: raise BIDSError( - "Reconstruction label for images will be automatically set, " + "Part label for images will be automatically set, " "remove from heuristic" ) # Insert it **before** the following string(s), whichever appears first. - for label in ['_dir', '_run', '_mod', '_echo', '_recording', '_proc', '_space', filetype]: + for label in ['_recording', '_proc', '_space', filetype]: if (label == filetype) or (label in filename): filename = filename.replace( - label, "_rec-%s%s" % (mag_or_phase, label) + label, "_part-%s%s" % (mag_or_phase, label) ) break
Replace rec entity with part entity for complex-valued data <!-- DO NOT DELETE THIS! This template is used to facilitate issue resolution. All text in <!-> tags will not be displayed. --> ### Summary <!-- If you are having conversion troubles, please share as much relevant information as possible. This includes, but is not limited to: - log of conversion - heuristic --> Now that bids-standard/bids-specification#424 has been merged, BIDS will support the `part` entity for dissociating magnitude and phase data in its next release. Currently, heudiconv uses `rec-<magnitude|phase>` instead. To fix this, [`heudiconv.convert.update_complex_name()`](https://github.com/nipy/heudiconv/blob/d855f64d5013f8a0e41789766a094d3c3a91552f/heudiconv/convert.py#L236) needs to be updated to use `part` instead of `rec` (also `magnitude` needs to changed to `mag`). ### Platform details: Choose one: - [ ] Local environment <!-- If selected, please provide OS and python version --> - [ ] Container <!-- If selected, please provide container name and tag"--> - Heudiconv version: <!-- To check: run heudiconv with just the --version flag -->
nipy/heudiconv
diff --git a/heudiconv/tests/test_convert.py b/heudiconv/tests/test_convert.py index 7593cb0..75c2194 100644 --- a/heudiconv/tests/test_convert.py +++ b/heudiconv/tests/test_convert.py @@ -10,13 +10,13 @@ from heudiconv.bids import BIDSError def test_update_complex_name(): """Unit testing for heudiconv.convert.update_complex_name(), which updates - filenames with the rec field if appropriate. + filenames with the part field if appropriate. """ # Standard name update fn = 'sub-X_ses-Y_task-Z_run-01_sbref' metadata = {'ImageType': ['ORIGINAL', 'PRIMARY', 'P', 'MB', 'TE3', 'ND', 'MOSAIC']} suffix = 3 - out_fn_true = 'sub-X_ses-Y_task-Z_rec-phase_run-01_sbref' + out_fn_true = 'sub-X_ses-Y_task-Z_run-01_part-phase_sbref' out_fn_test = update_complex_name(metadata, fn, suffix) assert out_fn_test == out_fn_true # Catch an unsupported type and *do not* update @@ -26,12 +26,12 @@ def test_update_complex_name(): # Data type is missing from metadata so use suffix fn = 'sub-X_ses-Y_task-Z_run-01_sbref' metadata = {'ImageType': ['ORIGINAL', 'PRIMARY', 'MB', 'TE3', 'ND', 'MOSAIC']} - out_fn_true = 'sub-X_ses-Y_task-Z_rec-3_run-01_sbref' + out_fn_true = 'sub-X_ses-Y_task-Z_run-01_part-3_sbref' out_fn_test = update_complex_name(metadata, fn, suffix) assert out_fn_test == out_fn_true # Catch existing field with value that *does not match* metadata # and raise Exception - fn = 'sub-X_ses-Y_task-Z_rec-magnitude_run-01_sbref' + fn = 'sub-X_ses-Y_task-Z_run-01_part-mag_sbref' metadata = {'ImageType': ['ORIGINAL', 'PRIMARY', 'P', 'MB', 'TE3', 'ND', 'MOSAIC']} suffix = 3 with pytest.raises(BIDSError):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "tinydb", "inotify" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
annexremote==1.6.6 boto3==1.33.13 botocore==1.33.13 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 chardet==5.2.0 charset-normalizer==3.4.1 ci-info==0.3.0 click==8.1.8 cryptography==44.0.2 datalad==1.1.0 dcmstack==0.9.0 etelemetry==0.3.1 exceptiongroup==1.2.2 fasteners==0.19 filelock==3.12.2 -e git+https://github.com/nipy/heudiconv.git@d855f64d5013f8a0e41789766a094d3c3a91552f#egg=heudiconv humanize==4.6.0 idna==3.10 importlib-metadata==4.13.0 importlib-resources==5.12.0 iniconfig==2.0.0 inotify==0.2.10 iso8601==2.1.0 isodate==0.6.1 jaraco.classes==3.2.3 jeepney==0.9.0 jmespath==1.0.1 keyring==24.1.1 keyrings.alt==4.2.0 looseversion==1.3.0 lxml==5.3.1 mock==5.2.0 more-itertools==9.1.0 msgpack==1.0.5 networkx==2.6.3 nibabel==4.0.2 nipype==1.8.6 nose==1.3.7 numpy==1.21.6 packaging==24.0 patool==1.12 platformdirs==4.0.0 pluggy==1.2.0 prov==2.0.1 pycparser==2.21 pydicom==2.4.4 pydot==2.0.0 pylibjpeg-libjpeg==1.3.4 pyparsing==3.1.4 pytest==7.4.4 python-dateutil==2.9.0.post0 python-gitlab==3.15.0 rdflib==6.3.2 requests==2.31.0 requests-toolbelt==1.0.0 s3transfer==0.8.2 scipy==1.7.3 SecretStorage==3.3.3 simplejson==3.20.1 six==1.17.0 swebench-matterhorn @ file:///swebench_matterhorn tinydb==4.8.0 tomli==2.0.1 tqdm==4.67.1 traits==6.3.2 typing_extensions==4.7.1 urllib3==1.26.20 zipp==3.15.0
name: heudiconv channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - annexremote==1.6.6 - boto3==1.33.13 - botocore==1.33.13 - cffi==1.15.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - ci-info==0.3.0 - click==8.1.8 - cryptography==44.0.2 - datalad==1.1.0 - dcmstack==0.9.0 - etelemetry==0.3.1 - exceptiongroup==1.2.2 - fasteners==0.19 - filelock==3.12.2 - humanize==4.6.0 - idna==3.10 - importlib-metadata==4.13.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - inotify==0.2.10 - iso8601==2.1.0 - isodate==0.6.1 - jaraco-classes==3.2.3 - jeepney==0.9.0 - jmespath==1.0.1 - keyring==24.1.1 - keyrings-alt==4.2.0 - looseversion==1.3.0 - lxml==5.3.1 - mock==5.2.0 - more-itertools==9.1.0 - msgpack==1.0.5 - networkx==2.6.3 - nibabel==4.0.2 - nipype==1.8.6 - nose==1.3.7 - numpy==1.21.6 - packaging==24.0 - patool==1.12 - platformdirs==4.0.0 - pluggy==1.2.0 - prov==2.0.1 - pycparser==2.21 - pydicom==2.4.4 - pydot==2.0.0 - pylibjpeg-libjpeg==1.3.4 - pyparsing==3.1.4 - pytest==7.4.4 - python-dateutil==2.9.0.post0 - python-gitlab==3.15.0 - rdflib==6.3.2 - requests==2.31.0 - requests-toolbelt==1.0.0 - s3transfer==0.8.2 - scipy==1.7.3 - secretstorage==3.3.3 - simplejson==3.20.1 - six==1.17.0 - swebench-matterhorn==0.0.0 - tinydb==4.8.0 - tomli==2.0.1 - tqdm==4.67.1 - traits==6.3.2 - typing-extensions==4.7.1 - urllib3==1.26.20 - zipp==3.15.0 prefix: /opt/conda/envs/heudiconv
[ "heudiconv/tests/test_convert.py::test_update_complex_name" ]
[]
[ "heudiconv/tests/test_convert.py::test_update_multiecho_name", "heudiconv/tests/test_convert.py::test_update_uncombined_name" ]
[]
Apache License 2.0
9,126
711
[ "heudiconv/convert.py" ]
boxed__mutmut-192
795b39baba4f95c109e6a8be33c7a4d4ef87df49
2020-12-11 01:52:25
795b39baba4f95c109e6a8be33c7a4d4ef87df49
diff --git a/mutmut/__init__.py b/mutmut/__init__.py index 78ad464..b77329a 100644 --- a/mutmut/__init__.py +++ b/mutmut/__init__.py @@ -910,8 +910,9 @@ def guess_paths_to_mutate(): class Progress(object): - def __init__(self, total): + def __init__(self, total, output_legend): self.total = total + self.output_legend = output_legend self.progress = 0 self.skipped = 0 self.killed_mutants = 0 @@ -920,7 +921,20 @@ class Progress(object): self.suspicious_mutants = 0 def print(self): - print_status('{}/{} 🎉 {} ⏰ {} 🤔 {} 🙁 {} 🔇 {}'.format(self.progress, self.total, self.killed_mutants, self.surviving_mutants_timeout, self.suspicious_mutants, self.surviving_mutants, self.skipped)) + print_status('{}/{} {} {} {} {} {} {} {} {} {} {}'.format( + self.progress, + self.total, + self.output_legend["killed"], + self.killed_mutants, + self.output_legend["timeout"], + self.surviving_mutants_timeout, + self.output_legend["suspicious"], + self.suspicious_mutants, + self.output_legend["survived"], + self.surviving_mutants, + self.output_legend["skipped"], + self.skipped) + ) def register(self, status): if status == BAD_SURVIVED: diff --git a/mutmut/__main__.py b/mutmut/__main__.py index 2136aee..43e6efc 100644 --- a/mutmut/__main__.py +++ b/mutmut/__main__.py @@ -100,6 +100,7 @@ DEFAULT_RUNNER = 'python -m pytest -x --assert=plain' @click.option('--untested-policy', type=click.Choice(['ignore', 'skipped', 'error', 'failure']), default='ignore') @click.option('--pre-mutation') @click.option('--post-mutation') [email protected]('--simple-output', is_flag=True, default=False, help="Swap emojis in mutmut output to plain text alternatives.") @config_from_setup_cfg( dict_synonyms='', paths_to_exclude='', @@ -113,7 +114,7 @@ def climain(command, argument, argument2, paths_to_mutate, backup, runner, tests test_time_multiplier, test_time_base, swallow_output, use_coverage, dict_synonyms, cache_only, version, suspicious_policy, untested_policy, pre_mutation, post_mutation, - use_patch_file, paths_to_exclude): + use_patch_file, paths_to_exclude, simple_output): """ commands:\n run [mutation id]\n @@ -137,14 +138,14 @@ commands:\n tests_dir, test_time_multiplier, test_time_base, swallow_output, use_coverage, dict_synonyms, cache_only, version, suspicious_policy, untested_policy, pre_mutation, - post_mutation, use_patch_file, paths_to_exclude)) + post_mutation, use_patch_file, paths_to_exclude, simple_output)) def main(command, argument, argument2, paths_to_mutate, backup, runner, tests_dir, test_time_multiplier, test_time_base, swallow_output, use_coverage, dict_synonyms, cache_only, version, suspicious_policy, untested_policy, pre_mutation, post_mutation, - use_patch_file, paths_to_exclude): + use_patch_file, paths_to_exclude, simple_output): """return exit code, after performing an mutation test run. :return: the exit code from executing the mutation tests @@ -223,6 +224,15 @@ def main(command, argument, argument2, paths_to_mutate, backup, runner, tests_di os.environ['PYTHONDONTWRITEBYTECODE'] = '1' # stop python from creating .pyc files using_testmon = '--testmon' in runner + output_legend = { + "killed": "🎉", + "timeout": "⏰", + "suspicious": "🤔", + "survived": "🙁", + "skipped": "🔇", + } + if simple_output: + output_legend = {key: key.upper() for (key, value) in output_legend.items()} print(""" - Mutation testing starting - @@ -237,12 +247,12 @@ Results are stored in .mutmut-cache. Print found mutants with `mutmut results`. Legend for output: -🎉 Killed mutants. The goal is for everything to end up in this bucket. -⏰ Timeout. Test suite took 10 times as long as the baseline so were killed. -🤔 Suspicious. Tests took a long time, but not long enough to be fatal. -🙁 Survived. This means your tests need to be expanded. -🔇 Skipped. Skipped. -""") +{killed} Killed mutants. The goal is for everything to end up in this bucket. +{timeout} Timeout. Test suite took 10 times as long as the baseline so were killed. +{suspicious} Suspicious. Tests took a long time, but not long enough to be fatal. +{survived} Survived. This means your tests need to be expanded. +{skipped} Skipped. Skipped. +""".format(**output_legend)) if runner is DEFAULT_RUNNER: try: import pytest @@ -309,7 +319,7 @@ Legend for output: print() print('2. Checking mutants') - progress = Progress(total=config.total) + progress = Progress(total=config.total, output_legend=output_legend) try: run_mutation_tests(config=config, progress=progress, mutations_by_file=mutations_by_file)
UTF-8 emoji icons fail to display in a Pycharm 2019.2.1 terminal in windows This was created on a windows 10 environment with Pycharm 2019.2.1. Running mutmut in the Pycharm native terminal fails to render the emojis indicating killed 🎉, suspicious 🤔, and surviving 🙁 mutants. For example: ```console (venv) >mutmut run --paths-to-mutate my_project - Mutation testing starting - These are the steps: 1. A full test suite run will be made to make sure we can run the tests successfully and we know how long it takes (to detect infinite loops for example) 2. Mutants will be generated and checked Results are stored in .mutmut-cache. Print found mutants with `mutmut results`. Legend for output: � Killed mutants. The goal is for everything to end up in this bucket. ⏰ Timeout. Test suite took 10 times as long as the baseline so were killed. � Suspicious. Tests took a long time, but not long enough to be fatal. � Survived. This means your tests needs to be expanded. mutmut cache is out of date, clearing it... 1. Running tests without mutations ... ``` Maybe we should simply adopt a ASCII text based notation?
boxed/mutmut
diff --git a/tests/test_main.py b/tests/test_main.py index 459279d..219f2b9 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -109,7 +109,7 @@ def test_compute_return_code(): class MockProgress(Progress): def __init__(self, killed_mutants, surviving_mutants, surviving_mutants_timeout, suspicious_mutants): - super(MockProgress, self).__init__(total=0) + super(MockProgress, self).__init__(total=0, output_legend={}) self.killed_mutants = killed_mutants self.surviving_mutants = surviving_mutants self.surviving_mutants_timeout = surviving_mutants_timeout @@ -441,3 +441,9 @@ def test_pre_and_post_mutation_hook(single_mutant_filesystem, tmpdir): assert "pre mutation stub" in result.output assert "post mutation stub" in result.output assert result.output.index("pre mutation stub") < result.output.index("post mutation stub") + + +def test_simple_output(filesystem): + result = CliRunner().invoke(climain, ['run', '--paths-to-mutate=foo.py', "--simple-output"], catch_exceptions=False) + print(repr(result.output)) + assert '14/14 KILLED 14 TIMEOUT 0 SUSPICIOUS 0 SURVIVED 0 SKIPPED 0' in repr(result.output)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "hammett", "mock", "coverage", "whatthepatch" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astunparse==1.6.3 click==8.1.8 coverage==7.8.0 exceptiongroup==1.2.2 glob2==0.7 hammett==0.10.0 iniconfig==2.1.0 junit-xml==1.8 mock==5.2.0 -e git+https://github.com/boxed/mutmut.git@795b39baba4f95c109e6a8be33c7a4d4ef87df49#egg=mutmut packaging==24.2 parso==0.8.4 pluggy==1.5.0 pony==0.7.19 pytest==8.3.5 pytest-cov==6.0.0 six==1.17.0 tomli==2.2.1 whatthepatch==1.0.7
name: mutmut channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astunparse==1.6.3 - click==8.1.8 - coverage==7.8.0 - exceptiongroup==1.2.2 - glob2==0.7 - hammett==0.10.0 - iniconfig==2.1.0 - junit-xml==1.8 - mock==5.2.0 - packaging==24.2 - parso==0.8.4 - pluggy==1.5.0 - pony==0.7.19 - pytest==8.3.5 - pytest-cov==6.0.0 - six==1.17.0 - tomli==2.2.1 - whatthepatch==1.0.7 prefix: /opt/conda/envs/mutmut
[ "tests/test_main.py::test_compute_return_code", "tests/test_main.py::test_simple_output" ]
[]
[ "tests/test_main.py::test_read_coverage_data", "tests/test_main.py::test_python_source_files[expected0-foo.py-tests_dirs0]", "tests/test_main.py::test_python_source_files[expected1-.-tests_dirs1]", "tests/test_main.py::test_python_source_files[expected2-.-tests_dirs2]", "tests/test_main.py::test_python_source_files__with_paths_to_exclude", "tests/test_main.py::test_popen_streaming_output_timeout", "tests/test_main.py::test_popen_streaming_output_stream", "tests/test_main.py::test_simple_apply", "tests/test_main.py::test_full_run_no_surviving_mutants", "tests/test_main.py::test_full_run_no_surviving_mutants_junit", "tests/test_main.py::test_full_run_one_surviving_mutant", "tests/test_main.py::test_full_run_one_surviving_mutant_junit", "tests/test_main.py::test_full_run_all_suspicious_mutant", "tests/test_main.py::test_full_run_all_suspicious_mutant_junit", "tests/test_main.py::test_use_coverage", "tests/test_main.py::test_use_patch_file", "tests/test_main.py::test_pre_and_post_mutation_hook" ]
[]
BSD 3-Clause "New" or "Revised" License
9,153
1,389
[ "mutmut/__init__.py", "mutmut/__main__.py" ]
tefra__xsdata-347
8f0cacd3640b76d282f859261d5832e27d77fbc9
2020-12-11 16:41:46
c31e7c4951671c0bc135cd831bdf39fe34f7a36a
sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/bug.png' alt='Bug' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=BUG) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=BUG) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/vulnerability.png' alt='Vulnerability' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=VULNERABILITY) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=VULNERABILITY) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/security_hotspot.png' alt='Security Hotspot' width='16' height='16' />](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=347&resolved=false&types=SECURITY_HOTSPOT) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=347&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=347&resolved=false&types=SECURITY_HOTSPOT) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/code_smell.png' alt='Code Smell' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=CODE_SMELL) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=347&resolved=false&types=CODE_SMELL) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/CoverageChart/NoCoverageInfo.png' alt='No Coverage information' width='16' height='16' />](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=347) No Coverage information [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/Duplications/3.png' alt='0.0%' width='16' height='16' />](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=347&metric=new_duplicated_lines_density&view=list) [0.0% Duplication](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=347&metric=new_duplicated_lines_density&view=list) codecov[bot]: # [Codecov](https://codecov.io/gh/tefra/xsdata/pull/347?src=pr&el=h1) Report > Merging [#347](https://codecov.io/gh/tefra/xsdata/pull/347?src=pr&el=desc) (88de6e8) into [master](https://codecov.io/gh/tefra/xsdata/commit/8f0cacd3640b76d282f859261d5832e27d77fbc9?el=desc) (8f0cacd) will **not change** coverage. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/tefra/xsdata/pull/347/graphs/tree.svg?width=650&height=150&src=pr&token=YzDDLtywvl)](https://codecov.io/gh/tefra/xsdata/pull/347?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #347 +/- ## ========================================= Coverage 100.00% 100.00% ========================================= Files 71 71 Lines 5725 5730 +5 Branches 997 998 +1 ========================================= + Hits 5725 5730 +5 ``` | [Impacted Files](https://codecov.io/gh/tefra/xsdata/pull/347?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [xsdata/formats/dataclass/serializers/mixins.py](https://codecov.io/gh/tefra/xsdata/pull/347/diff?src=pr&el=tree#diff-eHNkYXRhL2Zvcm1hdHMvZGF0YWNsYXNzL3NlcmlhbGl6ZXJzL21peGlucy5weQ==) | `100.00% <100.00%> (ø)` | | | [xsdata/formats/dataclass/serializers/xml.py](https://codecov.io/gh/tefra/xsdata/pull/347/diff?src=pr&el=tree#diff-eHNkYXRhL2Zvcm1hdHMvZGF0YWNsYXNzL3NlcmlhbGl6ZXJzL3htbC5weQ==) | `100.00% <100.00%> (ø)` | | | [xsdata/codegen/sanitizer.py](https://codecov.io/gh/tefra/xsdata/pull/347/diff?src=pr&el=tree#diff-eHNkYXRhL2NvZGVnZW4vc2FuaXRpemVyLnB5) | `100.00% <0.00%> (ø)` | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tefra/xsdata/pull/347?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tefra/xsdata/pull/347?src=pr&el=footer). Last update [8f0cacd...88de6e8](https://codecov.io/gh/tefra/xsdata/pull/347?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/xsdata/formats/dataclass/serializers/mixins.py b/xsdata/formats/dataclass/serializers/mixins.py index d6c862e3..76b8a597 100644 --- a/xsdata/formats/dataclass/serializers/mixins.py +++ b/xsdata/formats/dataclass/serializers/mixins.py @@ -14,6 +14,7 @@ from xml.sax.handler import ContentHandler from xsdata.exceptions import XmlWriterError from xsdata.formats.converter import converter from xsdata.formats.dataclass.serializers.config import SerializerConfig +from xsdata.models.enums import DataType from xsdata.models.enums import Namespace from xsdata.models.enums import QNames from xsdata.utils.constants import EMPTY_MAP @@ -120,7 +121,7 @@ class XmlWriter: if not self.pending_tag and check_pending: raise XmlWriterError("Empty pending tag.") - if isinstance(value, str) and value and value[0] == "{" and len(value) > 1: + if self.is_xsi_type(key, value): value = QName(value) name = split_qname(key) @@ -235,3 +236,12 @@ class XmlWriter: not qualified.""" if not self.pending_tag[0] and None in self.ns_map: self.ns_map[None] = "" + + @classmethod + def is_xsi_type(cls, key: str, value: Any) -> bool: + """Return whether the value is an xsi:type or not.""" + + if isinstance(value, str) and value.startswith("{"): + return key == QNames.XSI_TYPE or DataType.from_qname(value) is not None + + return False diff --git a/xsdata/formats/dataclass/serializers/xml.py b/xsdata/formats/dataclass/serializers/xml.py index 872130e6..147ce0ab 100644 --- a/xsdata/formats/dataclass/serializers/xml.py +++ b/xsdata/formats/dataclass/serializers/xml.py @@ -272,7 +272,7 @@ class XmlSerializer(AbstractSerializer): if value is not None and var.is_any_type: datatype = DataType.from_value(value) if datatype != DataType.STRING: - yield XmlWriterEvent.ATTR, QNames.XSI_TYPE, str(datatype) + yield XmlWriterEvent.ATTR, QNames.XSI_TYPE, QName(str(datatype)) yield XmlWriterEvent.DATA, value yield XmlWriterEvent.END, var.qname
Attribute values are parsed as qnames If an attribute value looks like a qname, but isn't, serializing breaks. Example: ```py from dataclasses import dataclass, field from xsdata.formats.dataclass.serializers import XmlSerializer from xsdata.formats.dataclass.serializers.config import SerializerConfig @dataclass class Element: attr: str = field(metadata=dict(type="Attribute", name="attr")) def test(value): element = Element(attr=value) config = SerializerConfig(pretty_print=True) serializer = XmlSerializer(config=config) print(serializer.render(element)) if __name__ == '__main__': test('{{}}') ``` When running the above script, lxml throws a `ValueError` exception: ``` Traceback (most recent call last): File "scratch.py", line 20, in <module> test('{{}}') File "scratch.py", line 16, in test print(serializer.render(element)) File "env38.win\lib\site-packages\xsdata\formats\dataclass\serializers\xml.py", line 55, in render self.write(output, obj, ns_map) File "env38.win\lib\site-packages\xsdata\formats\dataclass\serializers\xml.py", line 70, in write handler.write(events) File "env38.win\lib\site-packages\xsdata\formats\dataclass\serializers\writers\lxml.py", line 18, in write super().write(events) File "env38.win\lib\site-packages\xsdata\formats\dataclass\serializers\mixins.py", line 82, in write self.end_tag(*args) File "env38.win\lib\site-packages\xsdata\formats\dataclass\serializers\mixins.py", line 170, in end_tag self.flush_start(True) File "env38.win\lib\site-packages\xsdata\formats\dataclass\serializers\mixins.py", line 206, in flush_start self.handler.startElementNS(self.pending_tag, None, self.attrs) File "src\lxml\sax.py", line 111, in lxml.sax.ElementTreeContentHandler.startElementNS File "src\lxml\etree.pyx", line 3022, in lxml.etree.Element File "src\lxml\apihelpers.pxi", line 131, in lxml.etree._makeElement File "src\lxml\apihelpers.pxi", line 118, in lxml.etree._makeElement File "src\lxml\apihelpers.pxi", line 215, in lxml.etree._setNodeNamespaces File "src\lxml\apihelpers.pxi", line 1755, in lxml.etree._uriValidOrRaise ValueError: Invalid namespace URI '{' ``` Tested with versions 20.11.1 and 20.12
tefra/xsdata
diff --git a/tests/formats/dataclass/serializers/test_mixins.py b/tests/formats/dataclass/serializers/test_mixins.py index 70c08e23..66ad0040 100644 --- a/tests/formats/dataclass/serializers/test_mixins.py +++ b/tests/formats/dataclass/serializers/test_mixins.py @@ -172,8 +172,16 @@ class XmlWriterTests(TestCase): (None, "a"): "bar", (None, "b"): "true", (None, "c"): "{", - (None, "d"): "ns0:b", + (None, "d"): "{a}b", ("http://www.w3.org/2001/XMLSchema-instance", "type"): "xs:string", } self.assertEqual(expected, self.writer.attrs) + + def test_is_xsi_type(self): + self.assertFalse(self.writer.is_xsi_type("key", 1)) + self.assertFalse(self.writer.is_xsi_type(QNames.XSI_TYPE, 1)) + self.assertFalse(self.writer.is_xsi_type(QNames.XSI_TYPE, "a")) + self.assertTrue(self.writer.is_xsi_type(QNames.XSI_TYPE, "{b}a")) + self.assertFalse(self.writer.is_xsi_type("type", "{b}a")) + self.assertTrue(self.writer.is_xsi_type("type", str(DataType.STRING))) diff --git a/tests/formats/dataclass/serializers/test_xml.py b/tests/formats/dataclass/serializers/test_xml.py index ab5da7a2..c0f2269e 100644 --- a/tests/formats/dataclass/serializers/test_xml.py +++ b/tests/formats/dataclass/serializers/test_xml.py @@ -282,7 +282,7 @@ class XmlSerializerTests(TestCase): var = XmlElement(qname="a", name="a", types=[object]) expected = [ (XmlWriterEvent.START, "a"), - (XmlWriterEvent.ATTR, QNames.XSI_TYPE, str(DataType.INT)), + (XmlWriterEvent.ATTR, QNames.XSI_TYPE, QName(str(DataType.INT))), (XmlWriterEvent.DATA, 123), (XmlWriterEvent.END, "a"), ]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 2 }
20.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "lxml", "requests", "click", "toposort", "jinja2", "docformatter", "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 certifi==2025.1.31 cfgv==3.4.0 chardet==5.2.0 charset-normalizer==3.4.1 click==8.1.8 click-default-group==1.2.4 click-log==0.4.0 codecov==2.1.13 colorama==0.4.6 coverage==7.8.0 distlib==0.3.9 docformatter==1.7.5 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work filelock==3.18.0 identify==2.6.9 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.1.6 lxml==5.3.1 MarkupSafe==3.0.2 nodeenv==1.9.1 packaging @ file:///croot/packaging_1734472117206/work platformdirs==4.3.7 pluggy @ file:///croot/pluggy_1733169602837/work pre_commit==4.2.0 py-cpuinfo==9.0.0 pyproject-api==1.9.0 pytest @ file:///croot/pytest_1738938843180/work pytest-benchmark==5.1.0 pytest-cov==6.0.0 PyYAML==6.0.2 requests==2.32.3 tomli==2.2.1 toposort==1.10 tox==4.25.0 typing_extensions==4.13.0 untokenize==0.1.1 urllib3==2.3.0 virtualenv==20.29.3 -e git+https://github.com/tefra/xsdata.git@8f0cacd3640b76d282f859261d5832e27d77fbc9#egg=xsdata
name: xsdata channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - certifi==2025.1.31 - cfgv==3.4.0 - chardet==5.2.0 - charset-normalizer==3.4.1 - click==8.1.8 - click-default-group==1.2.4 - click-log==0.4.0 - codecov==2.1.13 - colorama==0.4.6 - coverage==7.8.0 - distlib==0.3.9 - docformatter==1.7.5 - filelock==3.18.0 - identify==2.6.9 - idna==3.10 - jinja2==3.1.6 - lxml==5.3.1 - markupsafe==3.0.2 - nodeenv==1.9.1 - platformdirs==4.3.7 - pre-commit==4.2.0 - py-cpuinfo==9.0.0 - pyproject-api==1.9.0 - pytest-benchmark==5.1.0 - pytest-cov==6.0.0 - pyyaml==6.0.2 - requests==2.32.3 - tomli==2.2.1 - toposort==1.10 - tox==4.25.0 - typing-extensions==4.13.0 - untokenize==0.1.1 - urllib3==2.3.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/xsdata
[ "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_add_attribute", "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_is_xsi_type" ]
[]
[ "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_write", "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_write_removes_xsi_nil_if_necessary", "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_write_resets_default_namespace_for_unqualified_elements", "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_write_with_no_namespace_schema_location", "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_write_with_schema_location", "tests/formats/dataclass/serializers/test_mixins.py::XmlWriterTests::test_write_with_unhandled_event_raises_exception", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_attribute", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_render_mixed_content", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_generic_object", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_when_no_matching_choice_exists", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_primitive_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_generic_object", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_raw_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_data", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_can_overwrite_params", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_with_no_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var_ignore_xs_string", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_nillable_true", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_mixed_content", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_object_with_derived_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_tokens", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_list_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_unhandled_xml_var", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_derived_class", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_illegal_derived_class" ]
[]
MIT License
9,158
593
[ "xsdata/formats/dataclass/serializers/mixins.py", "xsdata/formats/dataclass/serializers/xml.py" ]
Turbo87__utm-62
d0ed24f2ffea848c89b31bf0fe95a3f187a8b5ca
2020-12-11 17:11:55
e45377c40c36b729618230a9320e441062c94e6a
diff --git a/setup.py b/setup.py index 4239132..f44cba9 100644 --- a/setup.py +++ b/setup.py @@ -1,8 +1,11 @@ from distutils.core import setup +from utm._version import __version__ + + setup( name='utm', - version='0.7.0', + version=__version__, author='Tobias Bieniek', author_email='[email protected]', url='https://github.com/Turbo87/utm', diff --git a/utm/__init__.py b/utm/__init__.py index 42ae0be..7dacb8d 100644 --- a/utm/__init__.py +++ b/utm/__init__.py @@ -1,2 +1,3 @@ from utm.conversion import to_latlon, from_latlon, latlon_to_zone_number, latitude_to_zone_letter, check_valid_zone from utm.error import OutOfRangeError +from utm._version import __version__ diff --git a/utm/_version.py b/utm/_version.py new file mode 100644 index 0000000..49e0fc1 --- /dev/null +++ b/utm/_version.py @@ -0,0 +1,1 @@ +__version__ = "0.7.0"
__version__ \_\_version\_\_ is a convenient way to find a module's version. ``` >>> import numpy as np >>> np.__version__ '1.15.1' >>> import utm >>> utm.__version__ Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: module 'utm' has no attribute '__version__' ``` utm is lacking that attribute.
Turbo87/utm
diff --git a/test/test_utm.py b/test/test_utm.py index 268a856..f89c368 100755 --- a/test/test_utm.py +++ b/test/test_utm.py @@ -352,5 +352,10 @@ class TestForcingAntiMeridian(unittest.TestCase): UTM.from_latlon(0, -179.9, 60, 'N'), -179.9) +class TestProject(unittest.TestCase): + def test_version(self): + self.assertTrue(isinstance(UTM.__version__, str) and '.' in UTM.__version__) + + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 2 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "numpy>=1.16.0", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 numpy==2.0.2 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 tomli==2.2.1 -e git+https://github.com/Turbo87/utm.git@d0ed24f2ffea848c89b31bf0fe95a3f187a8b5ca#egg=utm
name: utm channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - numpy==2.0.2 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/utm
[ "test/test_utm.py::TestProject::test_version" ]
[]
[ "test/test_utm.py::KnownValues::test_from_latlon", "test/test_utm.py::KnownValues::test_from_latlon_numpy", "test/test_utm.py::KnownValues::test_to_latlon", "test/test_utm.py::KnownValues::test_to_latlon_numpy", "test/test_utm.py::BadInput::test_from_latlon_range_checks", "test/test_utm.py::BadInput::test_to_latlon_range_checks", "test/test_utm.py::Zone32V::test_above", "test/test_utm.py::Zone32V::test_below", "test/test_utm.py::Zone32V::test_inside", "test/test_utm.py::Zone32V::test_left_of", "test/test_utm.py::Zone32V::test_right_of", "test/test_utm.py::TestRightBoundaries::test_limits", "test/test_utm.py::TestValidZones::test_invalid_zones", "test/test_utm.py::TestValidZones::test_valid_zones", "test/test_utm.py::TestForcingZones::test_force_zone", "test/test_utm.py::TestForcingAntiMeridian::test_force_east", "test/test_utm.py::TestForcingAntiMeridian::test_force_west" ]
[]
MIT License
9,159
329
[ "setup.py", "utm/__init__.py" ]
marshmallow-code__apispec-616
5762c831fccf9d8dedcdfb901d106c02addc278b
2020-12-11 23:02:05
dedac199a073af76298817f391239cf5a3ee09ef
Bangertm: @lafrech I didn't understand your comment about using the result of `field2type_and_format` before. Thanks for clarifying. Yes that is a better test. In fact `field2range` (and all the other attribute functions) already has access to the result of `field2type_and_format` via the `ret` keyword argument: https://github.com/marshmallow-code/apispec/blob/dd31b87a9f77f8665f37127789c09565334dd253/src/apispec/ext/marshmallow/field_converter.py#L160-L161 So I can just change the test to rely on the `type` attribute, which allows users to declare a custom field to be a number and makes this code generic. We could potentially do this without changing the function signature because `kwargs` is available, but I think it's better to make the use of `ret` explicit in the signature. Thoughts? lafrech: Oh, right. I read the code too fast and missed the fact that we already have `kwargs` with `ret` in them. Yes, let's use this. We just need to be sure `field2type_and_format` runs first. Maybe this deserves a comment in `self.attribute_functions` declaration. We already relied on the order since we use `ret` in `nested2properties`.
diff --git a/src/apispec/ext/marshmallow/field_converter.py b/src/apispec/ext/marshmallow/field_converter.py index 0b3798a..8583ec0 100644 --- a/src/apispec/ext/marshmallow/field_converter.py +++ b/src/apispec/ext/marshmallow/field_converter.py @@ -83,6 +83,8 @@ class FieldConverterMixin: def init_attribute_functions(self): self.attribute_functions = [ + # self.field2type_and_format should run first + # as other functions may rely on its output self.field2type_and_format, self.field2default, self.field2choices, @@ -272,7 +274,7 @@ class FieldConverterMixin: ] = True return attributes - def field2range(self, field, **kwargs): + def field2range(self, field, ret): """Return the dictionary of OpenAPI field attributes for a set of :class:`Range <marshmallow.validators.Range>` validators. @@ -289,19 +291,12 @@ class FieldConverterMixin: ) ] - attributes = {} - for validator in validators: - if validator.min is not None: - if hasattr(attributes, "minimum"): - attributes["minimum"] = max(attributes["minimum"], validator.min) - else: - attributes["minimum"] = validator.min - if validator.max is not None: - if hasattr(attributes, "maximum"): - attributes["maximum"] = min(attributes["maximum"], validator.max) - else: - attributes["maximum"] = validator.max - return attributes + min_attr, max_attr = ( + ("minimum", "maximum") + if ret.get("type") in {"number", "integer"} + else ("x-minimum", "x-maximum") + ) + return make_min_max_attributes(validators, min_attr, max_attr) def field2length(self, field, **kwargs): """Return the dictionary of OpenAPI field attributes for a set of @@ -310,8 +305,6 @@ class FieldConverterMixin: :param Field field: A marshmallow field. :rtype: dict """ - attributes = {} - validators = [ validator for validator in field.validators @@ -328,23 +321,13 @@ class FieldConverterMixin: min_attr = "minItems" if is_array else "minLength" max_attr = "maxItems" if is_array else "maxLength" - for validator in validators: - if validator.min is not None: - if hasattr(attributes, min_attr): - attributes[min_attr] = max(attributes[min_attr], validator.min) - else: - attributes[min_attr] = validator.min - if validator.max is not None: - if hasattr(attributes, max_attr): - attributes[max_attr] = min(attributes[max_attr], validator.max) - else: - attributes[max_attr] = validator.max - - for validator in validators: - if validator.equal is not None: - attributes[min_attr] = validator.equal - attributes[max_attr] = validator.equal - return attributes + equal_list = [ + validator.equal for validator in validators if validator.equal is not None + ] + if equal_list: + return {min_attr: equal_list[0], max_attr: equal_list[0]} + + return make_min_max_attributes(validators, min_attr, max_attr) def field2pattern(self, field, **kwargs): """Return the dictionary of OpenAPI field attributes for a set of @@ -449,3 +432,23 @@ class FieldConverterMixin: if value_field: ret["additionalProperties"] = self.field2property(value_field) return ret + + +def make_min_max_attributes(validators, min_attr, max_attr): + """Return a dictionary of minimum and maximum attributes based on a list + of validators. If either minimum or maximum values are not present in any + of the validator objects that attribute will be omitted. + + :param validators list: A list of `Marshmallow` validator objects. Each + objct is inspected for a minimum and maximum values + :param min_attr string: The OpenAPI attribute for the minimum value + :param max_attr string: The OpenAPI attribute for the maximum value + """ + attributes = {} + min_list = [validator.min for validator in validators if validator.min is not None] + max_list = [validator.max for validator in validators if validator.max is not None] + if min_list: + attributes[min_attr] = max(min_list) + if max_list: + attributes[max_attr] = min(max_list) + return attributes
Minimum/maximum date is not jsonschema compliant I'm using flask-apispec (but I think that the issue is related to apispec) to generate a swagger 2.0 schema from webargs/marshmallow models that include datetimes with min/max boundaries like this: ``` mydate = fields.DateTime( required=True, validate=validate.Range( max=datetime.now(pytz.utc).replace(hour=23, minute=59, second=59), min=datetime(1900, 1, 1, tzinfo=pytz.utc), max_inclusive=True ) ) ``` The generated json shema is: ``` "mydate": { "format": "date-time", "maximum": "Tue, 08 Dec 2020 23:59:59 GMT", "minimum": "Mon, 01 Jan 1900 00:00:00 GMT", "type": "string" }, ``` But when I run schemathesis on the generated schema I obtain a validation error ``` E jsonschema.exceptions.ValidationError: 'Tue, 08 Dec 2020 23:59:59 GMT' is not of type 'number' E E Failed validating 'type' in schema['properties']['definitions']['additionalProperties']['properties']['properties']['additionalProperties']['properties']['maximum']: E {'type': 'number'} E E On instance['definitions']['Input']['properties']['mydate']['maximum']: E 'Tue, 08 Dec 2020 23:59:59 GMT' ``` Schemathesis is right, because minimum and maximum values are expected to be "JSON numbers" (https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3) and not datetimes On the other side having min and max date validation is a very appreciated feature, so the option to avoid to set these boundaries on the models is not really feasible for my use cases. Furthermore the use of schemathesis is very helpful for my test suite and I would like to continue to use it. Is there something that I can modify / configure to obtain jsonschema compliant models from webargs/marshmallow schema with datetimes min/max values? Or maybe is there something that apispec could fix to obtain the result? (for example convert min/max datetimes into timestamps? Having a number should be adherent to the schema specification) If not possible, I could try to open an issue on schemathesis to ask for some compatibility fixes. They already include [compatibility fixes for fastapi](https://schemathesis.readthedocs.io/en/stable/compatibility.html#using-fastapi) (that by using pydantic produces Draft 7 compatible schemas, while schemathesis validates against Draft 4 and Wright Draft 00) so maybe they would be interested in adding some compatibility fixes also for apispec. But in this case I would like to ask for your help to support the request by providing your motivations for not meeting the specifications or other information that they could ask to me. I really thank you for your help
marshmallow-code/apispec
diff --git a/tests/test_ext_marshmallow_openapi.py b/tests/test_ext_marshmallow_openapi.py index 5879073..36c6ad0 100644 --- a/tests/test_ext_marshmallow_openapi.py +++ b/tests/test_ext_marshmallow_openapi.py @@ -1,4 +1,5 @@ import pytest +from datetime import datetime from marshmallow import fields, Schema, validate @@ -498,6 +499,7 @@ class TestFieldValidation: class ValidationSchema(Schema): id = fields.Int(dump_only=True) range = fields.Int(validate=validate.Range(min=1, max=10)) + range_no_upper = fields.Float(validate=validate.Range(min=1)) multiple_ranges = fields.Int( validate=[ validate.Range(min=1), @@ -523,11 +525,13 @@ class TestFieldValidation: equal_length = fields.Str( validate=[validate.Length(equal=5), validate.Length(min=1, max=10)] ) + date_range = fields.DateTime(validate=validate.Range(min=datetime(1900, 1, 1),)) @pytest.mark.parametrize( ("field", "properties"), [ ("range", {"minimum": 1, "maximum": 10}), + ("range_no_upper", {"minimum": 1}), ("multiple_ranges", {"minimum": 3, "maximum": 7}), ("list_length", {"minItems": 1, "maxItems": 10}), ("custom_list_length", {"minItems": 1, "maxItems": 10}), @@ -535,6 +539,7 @@ class TestFieldValidation: ("custom_field_length", {"minLength": 1, "maxLength": 10}), ("multiple_lengths", {"minLength": 3, "maxLength": 7}), ("equal_length", {"minLength": 5, "maxLength": 5}), + ("date_range", {"x-minimum": datetime(1900, 1, 1)}), ], ) def test_properties(self, field, properties, spec):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
4.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/marshmallow-code/apispec.git@5762c831fccf9d8dedcdfb901d106c02addc278b#egg=apispec attrs==25.3.0 cachetools==5.5.2 certifi==2025.1.31 cfgv==3.4.0 chardet==5.2.0 charset-normalizer==3.4.1 colorama==0.4.6 distlib==0.3.9 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work filelock==3.18.0 flake8==3.8.4 flake8-bugbear==20.11.1 identify==2.6.9 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jsonschema==4.17.3 jsonschema-spec==0.1.6 lazy-object-proxy==1.10.0 marshmallow==3.26.1 mccabe==0.6.1 mock==5.2.0 nodeenv==1.9.1 openapi-schema-validator==0.4.4 openapi-spec-validator==0.5.7 packaging @ file:///croot/packaging_1734472117206/work pathable==0.4.4 platformdirs==4.3.7 pluggy @ file:///croot/pluggy_1733169602837/work prance==23.6.21.0 pre-commit==2.21.0 pycodestyle==2.6.0 pyflakes==2.2.0 pyproject-api==1.9.0 pyrsistent==0.20.0 pytest @ file:///croot/pytest_1738938843180/work PyYAML==6.0.2 requests==2.32.3 rfc3339-validator==0.1.4 ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.12 six==1.17.0 tomli==2.2.1 tox==4.25.0 typing_extensions==4.13.0 urllib3==2.3.0 virtualenv==20.29.3
name: apispec channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - apispec==4.0.0 - attrs==25.3.0 - cachetools==5.5.2 - certifi==2025.1.31 - cfgv==3.4.0 - chardet==5.2.0 - charset-normalizer==3.4.1 - colorama==0.4.6 - distlib==0.3.9 - filelock==3.18.0 - flake8==3.8.4 - flake8-bugbear==20.11.1 - identify==2.6.9 - idna==3.10 - jsonschema==4.17.3 - jsonschema-spec==0.1.6 - lazy-object-proxy==1.10.0 - marshmallow==3.26.1 - mccabe==0.6.1 - mock==5.2.0 - nodeenv==1.9.1 - openapi-schema-validator==0.4.4 - openapi-spec-validator==0.5.7 - pathable==0.4.4 - platformdirs==4.3.7 - prance==23.6.21.0 - pre-commit==2.21.0 - pycodestyle==2.6.0 - pyflakes==2.2.0 - pyproject-api==1.9.0 - pyrsistent==0.20.0 - pyyaml==6.0.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.12 - six==1.17.0 - tomli==2.2.1 - tox==4.25.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/apispec
[ "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-date_range-properties9]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-date_range-properties9]" ]
[]
[ "tests/test_ext_marshmallow_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping_if_schema_many[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[2.0-True]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[2.0-False]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[3.0.0-True]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[3.0.0-False]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[2.0-List]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[2.0-CustomList]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[3.0.0-List]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[3.0.0-CustomList]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_with_dump_only[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_many[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[2.0]", "tests/test_ext_marshmallow_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[2.0]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-only]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-exclude]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-only]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-exclude]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[2.0]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[3.0.0]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[2.0]", "tests/test_ext_marshmallow_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[3.0.0]", "tests/test_ext_marshmallow_openapi.py::test_openapi_tools_validate_v2", "tests/test_ext_marshmallow_openapi.py::test_openapi_tools_validate_v3", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-range-properties0]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-range_no_upper-properties1]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-multiple_ranges-properties2]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-list_length-properties3]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-custom_list_length-properties4]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-string_length-properties5]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-custom_field_length-properties6]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-multiple_lengths-properties7]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[2.0-equal_length-properties8]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-range-properties0]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-range_no_upper-properties1]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-multiple_ranges-properties2]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-list_length-properties3]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-custom_list_length-properties4]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-string_length-properties5]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-custom_field_length-properties6]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-multiple_lengths-properties7]", "tests/test_ext_marshmallow_openapi.py::TestFieldValidation::test_properties[3.0.0-equal_length-properties8]" ]
[]
MIT License
9,163
1,077
[ "src/apispec/ext/marshmallow/field_converter.py" ]
LeMyst__WikibaseIntegrator-58
30c2f766931f442a2ec7160073331811859a840d
2020-12-13 20:45:56
f2256ba1c07c5320134507fda04b6478a2cd5423
diff --git a/wikibaseintegrator/wbi_core.py b/wikibaseintegrator/wbi_core.py index 1fdf93a..0b94f3e 100644 --- a/wikibaseintegrator/wbi_core.py +++ b/wikibaseintegrator/wbi_core.py @@ -37,7 +37,7 @@ class ItemEngine(object): :type new_item: True or False :param data: a dictionary with property strings as keys and the data which should be written to a item as the property values - :type data: List[BaseDataType] + :type data: List[BaseDataType] or BaseDataType :param append_value: a list of properties where potential existing values should not be overwritten by the data passed in the :parameter data. :type append_value: list of property number strings @@ -109,7 +109,14 @@ class ItemEngine(object): 'PROPERTY_CONSTRAINT_PID'] if property_constraint_pid is None else property_constraint_pid self.distinct_values_constraint_qid = config[ 'DISTINCT_VALUES_CONSTRAINT_QID'] if distinct_values_constraint_qid is None else distinct_values_constraint_qid - self.data = [] if data is None else data + if data is None: + self.data = [] + elif isinstance(data, BaseDataType): + self.data = [data] + elif not isinstance(data, list): + raise TypeError("data must be a list or an instance of BaseDataType") + else: + self.data = data self.append_value = [] if append_value is None else append_value self.fast_run = fast_run self.fast_run_base_filter = fast_run_base_filter @@ -757,7 +764,7 @@ class ItemEngine(object): lang = config['DEFAULT_LANGUAGE'] if lang is None else lang if not isinstance(aliases, list): - raise ValueError('aliases must be a list') + raise TypeError('aliases must be a list') if if_exists != 'APPEND' and if_exists != 'REPLACE': raise ValueError('{} is not a valid value for if_exists (REPLACE or APPEND)'.format(if_exists))
Bug: when ItemEngine data= is given an object instead of a list of objects it spews a could not sort error It should check if it got a list and output a helpful error message instead
LeMyst/WikibaseIntegrator
diff --git a/wikibaseintegrator/tests/test_item_creation.py b/wikibaseintegrator/tests/test_item_creation.py index 7f7bf40..31fe271 100644 --- a/wikibaseintegrator/tests/test_item_creation.py +++ b/wikibaseintegrator/tests/test_item_creation.py @@ -39,8 +39,12 @@ class TestItemCreation(unittest.TestCase): for d in data: item = wbi_core.ItemEngine(new_item=True, data=[d], core_props=core_props) assert item.get_json_representation() + item = wbi_core.ItemEngine(new_item=True, data=d, core_props=core_props) + assert item.get_json_representation() item = wbi_core.ItemEngine(new_item=True, data=[d], core_props=set()) assert item.get_json_representation() + item = wbi_core.ItemEngine(new_item=True, data=d, core_props=set()) + assert item.get_json_representation() item = wbi_core.ItemEngine(new_item=True, data=data, core_props=core_props) assert item.get_json_representation()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "numpy>=1.16.0", "pandas>=1.0.0", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
backoff==2.2.1 certifi==2025.1.31 charset-normalizer==3.4.1 exceptiongroup==1.2.2 idna==3.10 iniconfig==2.1.0 mwoauth==0.4.0 numpy==2.0.2 oauthlib==3.2.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 PyJWT==2.10.1 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.32.3 requests-oauthlib==2.0.0 simplejson==3.20.1 six==1.17.0 tomli==2.2.1 tzdata==2025.2 urllib3==2.3.0 -e git+https://github.com/LeMyst/WikibaseIntegrator.git@30c2f766931f442a2ec7160073331811859a840d#egg=wikibaseintegrator
name: WikibaseIntegrator channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - backoff==2.2.1 - certifi==2025.1.31 - charset-normalizer==3.4.1 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - mwoauth==0.4.0 - numpy==2.0.2 - oauthlib==3.2.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - pyjwt==2.10.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.32.3 - requests-oauthlib==2.0.0 - simplejson==3.20.1 - six==1.17.0 - tomli==2.2.1 - tzdata==2025.2 - urllib3==2.3.0 prefix: /opt/conda/envs/WikibaseIntegrator
[ "wikibaseintegrator/tests/test_item_creation.py::TestItemCreation::test_new_item_creation" ]
[]
[]
[]
MIT License
9,169
491
[ "wikibaseintegrator/wbi_core.py" ]
napalm-automation-community__napalm-ros-86
0e5d11b2890559ae4321c21278746a6424f2dc65
2020-12-15 12:42:33
0e5d11b2890559ae4321c21278746a6424f2dc65
floatingstatic: @luqasz I did not bump the version (it is still `1.0.0` in `setup.py` and `0.8.0` in `.bumpversion.cfg`). I'm not sure what you are using at this point to tag releases. Could you let me know or add whatever version to this commit and do release via pypi if/when we merge this? Thanks!
diff --git a/napalm_ros/ros.py b/napalm_ros/ros.py index 84a566f..2fe20d7 100644 --- a/napalm_ros/ros.py +++ b/napalm_ros/ros.py @@ -58,10 +58,10 @@ class ROSDriver(NetworkDriver): for iface in self.api('/interface/print', stats=True): result[iface['name']] = defaultdict(int) stats = result[iface['name']] - stats['tx_errors'] += iface['tx-error'] - stats['rx_errors'] += iface['rx-error'] - stats['tx_discards'] += iface['tx-drop'] - stats['rx_discards'] += iface['rx-drop'] + stats['tx_errors'] += iface.get('tx-error', 0) + stats['rx_errors'] += iface.get('rx-error', 0) + stats['tx_discards'] += iface.get('tx-drop', 0) + stats['rx_discards'] += iface.get('rx-drop', 0) stats['tx_octets'] += iface['tx-byte'] stats['rx_octets'] += iface['rx-byte'] stats['tx_unicast_packets'] += iface['tx-packet']
Method get_interfaces_counters failed: 'tx-error' ### Description of Issue/Question get_interfaces_counters doesn't work. It reproduces error: ``` GET /api/dcim/devices/2/napalm/?method=get_interfaces_counters HTTP 200 OK Allow: GET, HEAD, OPTIONS Content-Type: application/json Vary: Accept { "get_interfaces_counters": { "error": "Method get_interfaces_counters failed: 'tx-error'" } } ``` ### Setup ### napalm-ros version Installed first with pip and then also from this github and used: `python3 ./setup.py install` ``` napalm-ros==1.0.0 ``` ### ROS version ``` "vendor": "MikroTik", "model": "RB4011iGS+", "os_version": "6.47.6 (stable)", ``` Am I using latest version if I installed it from git with setup.py install? Thanks.
napalm-automation-community/napalm-ros
diff --git a/tests/unit/mocked_data/test_get_interfaces_counters/running_false/_interface_print.json b/tests/unit/mocked_data/test_get_interfaces_counters/running_false/_interface_print.json new file mode 100644 index 0000000..3835e3f --- /dev/null +++ b/tests/unit/mocked_data/test_get_interfaces_counters/running_false/_interface_print.json @@ -0,0 +1,28 @@ +{ + "data": + [ + { + ".id": "*1", + "actual-mtu": 1500, + "default-name": "ether1", + "disabled": false, + "fp-rx-byte": 0, + "fp-rx-packet": 0, + "fp-tx-byte": 0, + "fp-tx-packet": 0, + "l2mtu": 1592, + "link-downs": 0, + "mac-address": "D4:CA:6D:26:05:F7", + "max-l2mtu": 9578, + "mtu": 1500, + "name": "ether1poe", + "running": false, + "rx-byte": 0, + "rx-packet": 0, + "tx-byte": 0, + "tx-packet": 0, + "tx-queue-drop": 0, + "type": "ether" + } + ] +} diff --git a/tests/unit/mocked_data/test_get_interfaces_counters/running_false/expected_result.json b/tests/unit/mocked_data/test_get_interfaces_counters/running_false/expected_result.json new file mode 100644 index 0000000..22f377d --- /dev/null +++ b/tests/unit/mocked_data/test_get_interfaces_counters/running_false/expected_result.json @@ -0,0 +1,16 @@ +{ + "ether1poe": { + "rx_unicast_packets": 0, + "rx_octets": 0, + "rx_errors": 0, + "rx_discards": 0, + "tx_unicast_packets": 0, + "tx_octets": 0, + "tx_errors": 0, + "tx_discards": 0, + "tx_multicast_packets": 0, + "rx_multicast_packets": 0, + "tx_broadcast_packets": 0, + "rx_broadcast_packets": 0 + } +}
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "napalm" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
bcrypt==4.3.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 ciscoconfparse==1.9.52 cryptography==44.0.2 Deprecated==1.2.18 dnspython==2.7.0 exceptiongroup==1.2.2 future==1.0.0 hier-config==2.2.3 idna==3.10 iniconfig==2.1.0 Jinja2==3.1.6 junos-eznc==2.7.3 librouteros==3.4.1 loguru==0.7.2 lxml==5.3.1 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mdurl==0.1.2 napalm==3.4.1 -e git+https://github.com/napalm-automation-community/napalm-ros.git@0e5d11b2890559ae4321c21278746a6424f2dc65#egg=napalm_ros ncclient==0.6.15 netaddr==1.3.0 netmiko==3.4.0 netutils==1.12.0 ntc_templates==7.8.0 packaging==24.2 paramiko==3.5.1 passlib==1.7.4 pluggy==1.5.0 pycparser==2.22 pyeapi==1.0.4 Pygments==2.19.1 PyNaCl==1.5.0 pyparsing==3.2.3 pyserial==3.5 pytest==8.3.5 PyYAML==6.0.2 requests==2.32.3 rich==14.0.0 ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.12 scp==0.15.0 six==1.17.0 tenacity==9.0.0 textfsm==1.1.3 toml==0.10.2 tomli==2.2.1 transitions==0.9.2 ttp==0.9.5 ttp-templates==0.3.7 typing_extensions==4.13.0 urllib3==2.3.0 wrapt==1.17.2 yamlordereddictloader==0.4.2
name: napalm-ros channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - bcrypt==4.3.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - ciscoconfparse==1.9.52 - cryptography==44.0.2 - deprecated==1.2.18 - dnspython==2.7.0 - exceptiongroup==1.2.2 - future==1.0.0 - hier-config==2.2.3 - idna==3.10 - iniconfig==2.1.0 - jinja2==3.1.6 - junos-eznc==2.7.3 - librouteros==3.4.1 - loguru==0.7.2 - lxml==5.3.1 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mdurl==0.1.2 - napalm==3.4.1 - ncclient==0.6.15 - netaddr==1.3.0 - netmiko==3.4.0 - netutils==1.12.0 - ntc-templates==7.8.0 - packaging==24.2 - paramiko==3.5.1 - passlib==1.7.4 - pluggy==1.5.0 - pycparser==2.22 - pyeapi==1.0.4 - pygments==2.19.1 - pynacl==1.5.0 - pyparsing==3.2.3 - pyserial==3.5 - pytest==8.3.5 - pyyaml==6.0.2 - requests==2.32.3 - rich==14.0.0 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.12 - scp==0.15.0 - six==1.17.0 - tenacity==9.0.0 - textfsm==1.1.3 - toml==0.10.2 - tomli==2.2.1 - transitions==0.9.2 - ttp==0.9.5 - ttp-templates==0.3.7 - typing-extensions==4.13.0 - urllib3==2.3.0 - wrapt==1.17.2 - yamlordereddictloader==0.4.2 prefix: /opt/conda/envs/napalm-ros
[ "tests/unit/test_getters.py::TestGetter::test_get_interfaces_counters[running_false]" ]
[ "tests/unit/test_getters.py::TestGetter::test_method_signatures", "tests/unit/test_getters.py::TestGetter::test_get_interfaces[normal]", "tests/unit/test_getters.py::TestGetter::test_get_interfaces[last-link-up-time]", "tests/unit/test_getters.py::TestGetter::test_get_config_filtered[no_test_case_found]" ]
[ "tests/unit/test_getters.py::TestGetter::test_is_alive[normal]", "tests/unit/test_getters.py::TestGetter::test_get_facts[pc]", "tests/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[multi_interface]", "tests/unit/test_getters.py::TestGetter::test_get_interfaces_counters[normal]", "tests/unit/test_getters.py::TestGetter::test_get_environment[pc]", "tests/unit/test_getters.py::TestGetter::test_get_environment[empty]", "tests/unit/test_getters.py::TestGetter::test_get_bgp_neighbors[ipv4-only]", "tests/unit/test_getters.py::TestGetter::test_get_lldp_neighbors_detail[multi_interface]", "tests/unit/test_getters.py::TestGetter::test_get_bgp_neighbors_detail[normal]", "tests/unit/test_getters.py::TestGetter::test_get_arp_table[normal]", "tests/unit/test_getters.py::TestGetter::test_get_arp_table_with_vrf[normal]", "tests/unit/test_getters.py::TestGetter::test_get_ipv6_neighbors_table[normal]", "tests/unit/test_getters.py::TestGetter::test_get_ntp_servers[multiple_dns]", "tests/unit/test_getters.py::TestGetter::test_get_ntp_servers[ip_only]", "tests/unit/test_getters.py::TestGetter::test_get_ntp_servers[ip_and_fqdn]", "tests/unit/test_getters.py::TestGetter::test_get_interfaces_ip[ipv4]", "tests/unit/test_getters.py::TestGetter::test_get_interfaces_ip[both]", "tests/unit/test_getters.py::TestGetter::test_get_mac_address_table[static_and_dynamic]", "tests/unit/test_getters.py::TestGetter::test_get_snmp_information[normal]", "tests/unit/test_getters.py::TestGetter::test_ping[localhost]", "tests/unit/test_getters.py::TestGetter::test_get_users[no_keys]", "tests/unit/test_getters.py::TestGetter::test_get_network_instances[single]", "tests/utils/test_utils.py::test_to_seconds[60s-60]", "tests/utils/test_utils.py::test_to_seconds[6s-6]", "tests/utils/test_utils.py::test_to_seconds[1m10s-70]", "tests/utils/test_utils.py::test_to_seconds[1h1m10s-3670]", "tests/utils/test_utils.py::test_to_seconds[1d1h1m10s-90070]", "tests/utils/test_utils.py::test_to_seconds[1w1d1h1m10s-694870]", "tests/utils/test_utils.py::test_iface_addresses[passed0-expected0]", "tests/utils/test_utils.py::test_iface_addresses_empty" ]
[]
null
9,179
279
[ "napalm_ros/ros.py" ]
microcombustion__ctwrap-61
9d8cf5f0afaad0995f38a0b670280465cd84b7a3
2020-12-15 19:39:42
7d9d3704470b59cafea8faf8f0d832dbbf56b02f
diff --git a/ctwrap/simulation.py b/ctwrap/simulation.py index ea81fb5..4048309 100644 --- a/ctwrap/simulation.py +++ b/ctwrap/simulation.py @@ -59,6 +59,7 @@ class Simulation(object): self._module = module self._output = output self.data = None + self._errored = False # ensure that module is well formed mod = self._load_module() @@ -140,7 +141,15 @@ class Simulation(object): config = module.defaults() config = Parser(config) - self.data = module.run(name, **config, **kwargs) + try: + self.data = module.run(name, **config, **kwargs) + self._errored = False + except Exception as err: + # Convert exception to warning + msg = "Simulation of '{}' for '{}' failed with error message:\n{}".format(module.__name__, name, err) + warnings.warn(msg, RuntimeWarning) + self.data = {name: (type(err).__name__, str(err))} + self._errored = True def defaults(self) -> Dict[str, Any]: """Pass-through returning simulation module defaults as a dictionary""" @@ -175,7 +184,7 @@ class Simulation(object): filename = Path(filepath) / filename # file check - fexists = Path.is_file(filename) + fexists = Path(filename).is_file() if fexists: with h5py.File(filename, 'r') as hdf: @@ -193,7 +202,13 @@ class Simulation(object): if formatt in supported: module = self._load_module() if hasattr(module, 'save'): - module.save(filename, self.data, task, **output) + if self._errored: + with h5py.File(filename, mode) as hdf: + for group, err in self.data.items(): + grp = hdf.create_group(group) + grp.attrs[err[0]] = err[1] + else: + module.save(filename, self.data, task, **output) else: raise AttributeError("{} simulation module has no method 'save' " "but output format was defined in configuration "
Parallel batch jobs fail silently While failing parallel jobs should not raise an error, some documentation needs to be generated
microcombustion/ctwrap
diff --git a/tests/invalid.py b/tests/invalid.py new file mode 100644 index 0000000..9f9f760 --- /dev/null +++ b/tests/invalid.py @@ -0,0 +1,22 @@ +"""Invalid module - raises error""" + + +def defaults(): + """Returns dictionary containing default arguments""" + return {'foo': 0.2} + + +def run(name, foo=.2): + """Simply raise error""" + raise RuntimeError("Hello world!") + + +def save(filename, data, task=None): + """Does nothing for this example""" + pass + + +if __name__ == "__main__": + """ Main function """ + config = defaults() + out = run('main', **config) diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py index 367f7ea..375fef7 100644 --- a/tests/test_wrapper.py +++ b/tests/test_wrapper.py @@ -8,6 +8,7 @@ from pathlib import Path import subprocess import pint.quantity as pq import importlib +import h5py import warnings # add exception as pywintypes imports a deprecated module @@ -40,37 +41,35 @@ class TestWrap(unittest.TestCase): _path = None _strategy = 'sequence' + def setUp(self): + self.sim = cw.Simulation.from_module(self._module) + self.sh = cw.SimulationHandler.from_yaml(self._yaml, strategy=self._strategy, path=EXAMPLES) + def tearDown(self): if self._hdf: [hdf.unlink() for hdf in Path(EXAMPLES).glob('*.h5')] [hdf.unlink() for hdf in Path(ROOT).glob('*.h5')] def test_simulation(self): - sim = cw.Simulation.from_module(self._module) - self.assertIsNone(sim.data) - sim.run() - self.assertIsInstance(sim.data, dict) - for key in sim.data.keys(): + self.assertIsNone(self.sim.data) + self.sim.run() + self.assertIsInstance(self.sim.data, dict) + for key in self.sim.data.keys(): self.assertIn('defaults', key) def test_handler(self): - sh = cw.SimulationHandler.from_yaml(self._yaml, strategy=self._strategy, path=EXAMPLES) - self.assertIsInstance(sh.tasks, dict) - self.assertIn(self._task, sh.tasks) + self.assertIsInstance(self.sh.tasks, dict) + self.assertIn(self._task, self.sh.tasks) def test_serial(self): - sim = cw.Simulation.from_module(self._module) - sh = cw.SimulationHandler.from_yaml(self._yaml, strategy=self._strategy, path=EXAMPLES) - self.assertTrue(sh.run_serial(sim)) + self.assertTrue(self.sh.run_serial(self.sim)) if self._hdf: hdf = Path(EXAMPLES) / self._hdf self.assertTrue(hdf.is_file()) def test_parallel(self): - sim = cw.Simulation.from_module(self._module) - sh = cw.SimulationHandler.from_yaml(self._yaml, strategy=self._strategy, path=EXAMPLES) - self.assertTrue(sh.run_parallel(sim)) + self.assertTrue(self.sh.run_parallel(self.sim)) if self._hdf: hdf = Path(EXAMPLES) / self._hdf @@ -148,5 +147,43 @@ class TestAdiabaticFlame(TestWrap): _strategy = None +class TestInvalid(TestWrap): + + _module = str(ROOT / 'tests' / 'invalid.py') + _task = 'foo_1' + _dict = { + 'strategy': {'sequence': {'foo': [0, 1, 2]}}, + 'defaults': {'foo': None}, + 'output': {'name': 'invalid', 'format': 'h5'}, + 'ctwrap': '0.2.0' + } + + def setUp(self): + self.sim = cw.Simulation.from_module(self._module) + self.sh = cw.SimulationHandler.from_dict(self._dict) + + def tearDown(self): + h5 = Path('invalid.h5') + if h5.is_file(): + with h5py.File(h5, 'r') as hdf: + for data in hdf.values(): + for attr in data.attrs: + self.assertEqual(attr, 'RuntimeError') + self.assertEqual(data.attrs[attr], "Hello world!") + h5.unlink() + + def test_simulation(self): + with self.assertWarnsRegex(RuntimeWarning, "Hello world!"): + super().test_simulation() + + def test_serial(self): + with self.assertWarnsRegex(RuntimeWarning, "Hello world!"): + super().test_serial() + + def test_commandline(self): + # skip test (does not use setUp and is more involved) + pass + + if __name__ == "__main__": unittest.main()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": [ "environment.yml" ], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "environment.yml", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiofiles @ file:///home/conda/feedstock_root/build_artifacts/aiofiles_1664378549280/work aiosqlite @ file:///home/conda/feedstock_root/build_artifacts/aiosqlite_1715928379913/work anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1688651106312/work/dist argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1692818318753/work argon2-cffi-bindings @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi-bindings_1649500320262/work arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1662382474514/work attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1722977137225/work Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1702422572539/work backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1702571698061/work beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1705564648255/work bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1648883617327/work cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work Cantera @ file:///home/conda/feedstock_root/build_artifacts/cantera-recipe_1663034147571/work/build/python certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1666183775483/work charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1710320294760/work coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1664603898546/work -e git+https://github.com/microcombustion/ctwrap.git@9d8cf5f0afaad0995f38a0b670280465cd84b7a3#egg=ctwrap cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work debugpy @ file:///home/conda/feedstock_root/build_artifacts/debugpy_1660619049122/work decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1720869315914/work fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1718477020893/work/dist fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1666389892786/work fqdn @ file:///home/conda/feedstock_root/build_artifacts/fqdn_1638810296540/work/dist h5py @ file:///home/conda/feedstock_root/build_artifacts/h5py_1660488104552/work idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1653252814274/work importlib-resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1688813467203/work iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1673103042956/work ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1666723258080/work ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1651240553635/work ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1724334859652/work isoduration @ file:///home/conda/feedstock_root/build_artifacts/isoduration_1638811571363/work/dist jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1715127149914/work json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1712986206667/work jsonpointer==2.0 jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema-meta_1669810440410/work jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1725037521377/work jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1676328545892/work jupyter-events @ file:///home/conda/feedstock_root/build_artifacts/jupyter_events_1690301630599/work jupyter-server @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_1671591499479/work jupyter-ydoc @ file:///home/conda/feedstock_root/build_artifacts/jupyter_ydoc_1685535850115/work/dist jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1673615989977/work jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1658332345782/work jupyter_server_fileid @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_fileid_1714390608391/work jupyter_server_ydoc @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_ydoc_1678043727957/work jupyterlab @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_1724937868967/work jupyterlab_pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1700744013163/work jupyterlab_server @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_server_1690205927615/work jupyterlab_widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1724331334887/work kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1657953088445/work MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1648737551960/work matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1661439848456/work matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1713250518406/work mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1698947099619/work munkres==1.1.4 nbclassic @ file:///home/conda/feedstock_root/build_artifacts/nbclassic_1716838762700/work nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1665125402713/work nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert-meta_1687202153002/work nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1679336765223/work nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1715848908871/work notebook_shim @ file:///home/conda/feedstock_root/build_artifacts/notebook-shim_1707957777232/work numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1649806299270/work packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1696202382185/work pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1712320355065/work pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1706113125309/work pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1660385854171/work Pint @ file:///home/conda/feedstock_root/build_artifacts/pint_1635280347517/work pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1694617248815/work pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1648772594554/work ply @ file:///home/conda/feedstock_root/build_artifacts/ply_1712242996588/work prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1727341649933/work psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1666155398032/work ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1700607939962/work pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work PyQt5==5.15.7 PyQt5-sip==12.11.0 pyrsistent @ file:///home/conda/feedstock_root/build_artifacts/pyrsistent_1649013358450/work PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1648857264451/work pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1704035161844/work pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1684964868191/work python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work python-json-logger @ file:///home/conda/feedstock_root/build_artifacts/python-json-logger_1677079630776/work pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1648757092905/work pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1663830492333/work requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1716354486713/work rfc3339-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3339-validator_1638811747357/work rfc3986-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3986-validator_1598024191506/work ruamel-yaml-conda @ file:///home/conda/feedstock_root/build_artifacts/ruamel_yaml_1653464404698/work ruamel.yaml @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml_1649033206568/work ruamel.yaml.clib @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml.clib_1649013068865/work Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1712584999685/work sip @ file:///home/conda/feedstock_root/build_artifacts/sip_1665592359543/work six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1708952932303/work soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1658207591808/work terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1670253674810/work tinycss2 @ file:///home/conda/feedstock_root/build_artifacts/tinycss2_1729802851396/work toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1727974628237/work tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1656937818679/work traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1675110562325/work typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1688315532570/work unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1649111917568/work uri-template @ file:///home/conda/feedstock_root/build_artifacts/uri-template_1688655812972/work/dist urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1708239446578/work wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1699959196938/work webcolors @ file:///home/conda/feedstock_root/build_artifacts/webcolors_1723294704277/work webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work websocket-client @ file:///home/conda/feedstock_root/build_artifacts/websocket-client_1687789148259/work widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1724331337528/work y-py @ file:///home/conda/feedstock_root/build_artifacts/y-py_1658953063597/work ypy-websocket @ file:///home/conda/feedstock_root/build_artifacts/ypy-websocket_1670333059911/work zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1677313463193/work
name: ctwrap channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=conda_forge - _openmp_mutex=4.5=2_gnu - aiofiles=22.1.0=pyhd8ed1ab_0 - aiosqlite=0.20.0=pyhd8ed1ab_0 - alsa-lib=1.2.8=h166bdaf_0 - anyio=3.7.1=pyhd8ed1ab_0 - argon2-cffi=23.1.0=pyhd8ed1ab_0 - argon2-cffi-bindings=21.2.0=py37h540881e_2 - arrow=1.2.3=pyhd8ed1ab_0 - attr=2.5.1=h166bdaf_1 - attrs=24.2.0=pyh71513ae_0 - babel=2.14.0=pyhd8ed1ab_0 - backcall=0.2.0=pyh9f0ad1d_0 - backports=1.0=pyhd8ed1ab_4 - backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0 - beautifulsoup4=4.12.3=pyha770c72_0 - bleach=6.1.0=pyhd8ed1ab_0 - brotli=1.1.0=hb9d3cd8_2 - brotli-bin=1.1.0=hb9d3cd8_2 - brotli-python=1.0.9=py37hd23a5d3_7 - bzip2=1.0.8=h4bc722e_7 - c-ares=1.34.4=hb9d3cd8_0 - ca-certificates=2025.1.31=hbcca054_0 - cached-property=1.5.2=hd8ed1ab_1 - cached_property=1.5.2=pyha770c72_1 - cairo=1.16.0=ha61ee94_1012 - cantera=2.6.0=py37he9ed413_4 - certifi=2024.8.30=pyhd8ed1ab_0 - cffi=1.15.1=py37h43b0acd_1 - charset-normalizer=3.4.0=pyhd8ed1ab_0 - colorama=0.4.6=pyhd8ed1ab_0 - comm=0.2.2=pyhd8ed1ab_0 - coverage=6.5.0=py37h540881e_0 - cycler=0.11.0=pyhd8ed1ab_0 - dbus=1.13.6=h5008d03_3 - debugpy=1.6.3=py37hd23a5d3_0 - decorator=5.1.1=pyhd8ed1ab_0 - defusedxml=0.7.1=pyhd8ed1ab_0 - entrypoints=0.4=pyhd8ed1ab_0 - exceptiongroup=1.2.2=pyhd8ed1ab_0 - expat=2.6.4=h5888daf_0 - fftw=3.3.10=nompi_hf1063bd_110 - fmt=9.1.0=h924138e_0 - font-ttf-dejavu-sans-mono=2.37=hab24e00_0 - font-ttf-inconsolata=3.000=h77eed37_0 - font-ttf-source-code-pro=2.038=h77eed37_0 - font-ttf-ubuntu=0.83=h77eed37_3 - fontconfig=2.15.0=h7e30c49_1 - fonts-conda-ecosystem=1=0 - fonts-conda-forge=1=0 - fonttools=4.38.0=py37h540881e_0 - fqdn=1.5.1=pyhd8ed1ab_0 - freetype=2.13.3=h48d6fc4_0 - gettext=0.23.1=h5888daf_0 - gettext-tools=0.23.1=h5888daf_0 - glib=2.84.0=h07242d1_0 - glib-tools=2.84.0=h4833e2c_0 - gmp=6.3.0=hac33072_2 - graphite2=1.3.13=h59595ed_1003 - gst-plugins-base=1.22.0=h4243ec0_2 - gstreamer=1.22.0=h25f0c4b_2 - gstreamer-orc=0.4.41=h17648ed_0 - h5py=3.7.0=nompi_py37hf1ce037_101 - harfbuzz=6.0.0=h8e241bc_0 - hdf5=1.12.2=nompi_h4df4325_101 - icu=70.1=h27087fc_0 - idna=3.10=pyhd8ed1ab_0 - importlib-metadata=4.11.4=py37h89c1867_0 - importlib_metadata=4.11.4=hd8ed1ab_0 - importlib_resources=6.0.0=pyhd8ed1ab_0 - iniconfig=2.0.0=pyhd8ed1ab_0 - ipykernel=6.16.2=pyh210e3f2_0 - ipython=7.33.0=py37h89c1867_0 - ipython_genutils=0.2.0=pyhd8ed1ab_1 - ipywidgets=8.1.5=pyhd8ed1ab_0 - isoduration=20.11.0=pyhd8ed1ab_0 - jack=1.9.22=h11f4161_0 - jedi=0.19.1=pyhd8ed1ab_0 - jinja2=3.1.4=pyhd8ed1ab_0 - jpeg=9e=h0b41bf4_3 - json5=0.9.25=pyhd8ed1ab_0 - jsonpointer=2.0=py_0 - jsonschema=4.17.3=pyhd8ed1ab_0 - jsonschema-with-format-nongpl=4.17.3=pyhd8ed1ab_0 - jupyter=1.1.1=pyhd8ed1ab_0 - jupyter_client=7.4.9=pyhd8ed1ab_0 - jupyter_console=6.5.1=pyhd8ed1ab_0 - jupyter_core=4.11.1=py37h89c1867_0 - jupyter_events=0.6.3=pyhd8ed1ab_1 - jupyter_server=1.23.4=pyhd8ed1ab_0 - jupyter_server_fileid=0.9.2=pyhd8ed1ab_0 - jupyter_server_ydoc=0.8.0=pyhd8ed1ab_0 - jupyter_ydoc=0.2.4=pyhd8ed1ab_0 - jupyterlab=3.6.8=pyhd8ed1ab_0 - jupyterlab_pygments=0.3.0=pyhd8ed1ab_0 - jupyterlab_server=2.24.0=pyhd8ed1ab_0 - jupyterlab_widgets=3.0.13=pyhd8ed1ab_0 - keyutils=1.6.1=h166bdaf_0 - kiwisolver=1.4.4=py37h7cecad7_0 - krb5=1.20.1=h81ceb04_0 - lame=3.100=h166bdaf_1003 - lcms2=2.14=h6ed2654_0 - ld_impl_linux-64=2.43=h712a8e2_4 - lerc=4.0.0=h27087fc_0 - libaec=1.1.3=h59595ed_0 - libasprintf=0.23.1=h8e693c7_0 - libasprintf-devel=0.23.1=h8e693c7_0 - libblas=3.9.0=31_h59b9bed_openblas - libbrotlicommon=1.1.0=hb9d3cd8_2 - libbrotlidec=1.1.0=hb9d3cd8_2 - libbrotlienc=1.1.0=hb9d3cd8_2 - libcantera=2.6.0=h587f4e0_5 - libcap=2.67=he9d0100_0 - libcblas=3.9.0=31_he106b2a_openblas - libclang=15.0.7=default_h127d8a8_5 - libclang13=15.0.7=default_h5d6823c_5 - libcups=2.3.3=h36d4200_3 - libcurl=8.1.2=h409715c_0 - libdb=6.2.32=h9c3ff4c_0 - libdeflate=1.14=h166bdaf_0 - libedit=3.1.20250104=pl5321h7949ede_0 - libev=4.33=hd590300_2 - libevent=2.1.10=h28343ad_4 - libexpat=2.6.4=h5888daf_0 - libffi=3.4.6=h2dba641_0 - libflac=1.4.3=h59595ed_0 - libgcc=14.2.0=h767d61c_2 - libgcc-ng=14.2.0=h69a702a_2 - libgcrypt=1.11.0=ha770c72_2 - libgcrypt-devel=1.11.0=hb9d3cd8_2 - libgcrypt-lib=1.11.0=hb9d3cd8_2 - libgcrypt-tools=1.11.0=hb9d3cd8_2 - libgettextpo=0.23.1=h5888daf_0 - libgettextpo-devel=0.23.1=h5888daf_0 - libgfortran=14.2.0=h69a702a_2 - libgfortran-ng=14.2.0=h69a702a_2 - libgfortran5=14.2.0=hf1ad2bd_2 - libglib=2.84.0=h2ff4ddf_0 - libgomp=14.2.0=h767d61c_2 - libgpg-error=1.51=hbd13f7d_1 - libiconv=1.18=h4ce23a2_1 - liblapack=3.9.0=31_h7ac8fdf_openblas - libllvm15=15.0.7=hadd5161_1 - libltdl=2.4.3a=h5888daf_0 - liblzma=5.6.4=hb9d3cd8_0 - liblzma-devel=5.6.4=hb9d3cd8_0 - libnghttp2=1.58.0=h47da74e_0 - libnsl=2.0.1=hd590300_0 - libogg=1.3.5=h4ab18f5_0 - libopenblas=0.3.29=pthreads_h94d23a6_0 - libopus=1.3.1=h7f98852_1 - libpng=1.6.47=h943b412_0 - libpq=15.3=hbcd7760_1 - libsndfile=1.2.2=hc60ed4a_1 - libsodium=1.0.18=h36c2ea0_1 - libsqlite=3.49.1=hee588c1_2 - libssh2=1.11.0=h0841786_0 - libstdcxx=14.2.0=h8f9b012_2 - libstdcxx-ng=14.2.0=h4852527_2 - libsystemd0=253=h8c4010b_1 - libtiff=4.4.0=h82bc61c_5 - libtool=2.5.4=h5888daf_0 - libudev1=253=h0b41bf4_1 - libuuid=2.38.1=h0b41bf4_0 - libvorbis=1.3.7=h9c3ff4c_0 - libwebp-base=1.5.0=h851e524_0 - libxcb=1.13=h7f98852_1004 - libxkbcommon=1.5.0=h79f4944_1 - libxml2=2.10.3=hca2bb57_4 - libzlib=1.3.1=hb9d3cd8_2 - lz4-c=1.9.4=hcb278e6_0 - markupsafe=2.1.1=py37h540881e_1 - matplotlib=3.5.3=py37h89c1867_2 - matplotlib-base=3.5.3=py37hf395dca_2 - matplotlib-inline=0.1.7=pyhd8ed1ab_0 - metis=5.1.0=hd0bcaf9_1007 - mistune=3.0.2=pyhd8ed1ab_0 - mpfr=4.2.1=h90cbb55_3 - mpg123=1.32.9=hc50e24c_0 - munkres=1.1.4=pyh9f0ad1d_0 - mysql-common=8.0.33=hf1915f5_6 - mysql-libs=8.0.33=hca2cd23_6 - nbclassic=1.1.0=pyhd8ed1ab_0 - nbclient=0.7.0=pyhd8ed1ab_0 - nbconvert-core=7.6.0=pyhd8ed1ab_0 - nbformat=5.8.0=pyhd8ed1ab_0 - ncurses=6.5=h2d0b736_3 - nest-asyncio=1.6.0=pyhd8ed1ab_0 - notebook=6.5.7=pyha770c72_0 - notebook-shim=0.2.4=pyhd8ed1ab_0 - nspr=4.36=h5888daf_0 - nss=3.110=h159eef7_0 - numpy=1.21.6=py37h976b520_0 - openjpeg=2.5.0=h7d73246_1 - openssl=3.1.8=h7b32b05_0 - packaging=23.2=pyhd8ed1ab_0 - pandocfilters=1.5.0=pyhd8ed1ab_0 - parso=0.8.4=pyhd8ed1ab_0 - pcre2=10.44=hba22ea6_2 - pexpect=4.9.0=pyhd8ed1ab_0 - pickleshare=0.7.5=py_1003 - pillow=9.2.0=py37h850a105_2 - pint=0.18=pyhd8ed1ab_0 - pip=24.0=pyhd8ed1ab_0 - pixman=0.44.2=h29eaf8c_0 - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1 - pluggy=1.0.0=py37h89c1867_3 - ply=3.11=pyhd8ed1ab_2 - prometheus_client=0.17.1=pyhd8ed1ab_0 - prompt-toolkit=3.0.48=pyha770c72_0 - prompt_toolkit=3.0.48=hd8ed1ab_1 - psutil=5.9.3=py37h540881e_0 - pthread-stubs=0.4=hb9d3cd8_1002 - ptyprocess=0.7.0=pyhd3deb0d_0 - pulseaudio=16.1=hcb278e6_3 - pulseaudio-client=16.1=h5195f5e_3 - pulseaudio-daemon=16.1=ha8d29e2_3 - pycparser=2.21=pyhd8ed1ab_0 - pygments=2.17.2=pyhd8ed1ab_0 - pyparsing=3.1.4=pyhd8ed1ab_0 - pyqt=5.15.7=py37hf30b843_1 - pyqt5-sip=12.11.0=py37hd23a5d3_1 - pyrsistent=0.18.1=py37h540881e_1 - pysocks=1.7.1=py37h89c1867_5 - pytest=7.4.4=pyhd8ed1ab_0 - pytest-cov=4.1.0=pyhd8ed1ab_0 - python=3.7.12=hf930737_100_cpython - python-dateutil=2.9.0=pyhd8ed1ab_0 - python-fastjsonschema=2.20.0=pyhd8ed1ab_0 - python-json-logger=2.0.7=pyhd8ed1ab_0 - python_abi=3.7=4_cp37m - pytz=2024.2=pyhd8ed1ab_0 - pyyaml=6.0=py37h540881e_4 - pyzmq=24.0.1=py37h0c0c2a8_0 - qt-main=5.15.8=h5d23da1_6 - readline=8.2=h8c095d6_2 - requests=2.32.2=pyhd8ed1ab_0 - rfc3339-validator=0.1.4=pyhd8ed1ab_0 - rfc3986-validator=0.1.1=pyh9f0ad1d_0 - ruamel.yaml=0.17.21=py37h540881e_1 - ruamel.yaml.clib=0.2.6=py37h540881e_1 - ruamel_yaml=0.15.80=py37h540881e_1007 - send2trash=1.8.3=pyh0d859eb_0 - setuptools=69.0.3=pyhd8ed1ab_0 - sip=6.7.2=py37hd23a5d3_0 - six=1.16.0=pyh6c4a22f_0 - sniffio=1.3.1=pyhd8ed1ab_0 - soupsieve=2.3.2.post1=pyhd8ed1ab_0 - sqlite=3.49.1=h9eae976_2 - suitesparse=5.10.1=h5a4f163_3 - sundials=5.7.0=h558c624_0 - tbb=2022.0.0=hdb19cb5_0 - terminado=0.17.1=pyh41d4057_0 - tinycss2=1.4.0=pyhd8ed1ab_0 - tk=8.6.13=noxft_h4845f30_101 - toml=0.10.2=pyhd8ed1ab_0 - tomli=2.0.2=pyhd8ed1ab_0 - tornado=6.2=py37h540881e_0 - traitlets=5.9.0=pyhd8ed1ab_0 - typing-extensions=4.7.1=hd8ed1ab_0 - typing_extensions=4.7.1=pyha770c72_0 - unicodedata2=14.0.0=py37h540881e_1 - uri-template=1.3.0=pyhd8ed1ab_0 - urllib3=2.2.1=pyhd8ed1ab_0 - wcwidth=0.2.10=pyhd8ed1ab_0 - webcolors=24.8.0=pyhd8ed1ab_0 - webencodings=0.5.1=pyhd8ed1ab_2 - websocket-client=1.6.1=pyhd8ed1ab_0 - wheel=0.42.0=pyhd8ed1ab_0 - widgetsnbextension=4.0.13=pyhd8ed1ab_0 - xcb-util=0.4.0=h516909a_0 - xcb-util-image=0.4.0=h166bdaf_0 - xcb-util-keysyms=0.4.0=h516909a_0 - xcb-util-renderutil=0.3.9=h166bdaf_0 - xcb-util-wm=0.4.1=h516909a_0 - xkeyboard-config=2.38=h0b41bf4_0 - xorg-kbproto=1.0.7=hb9d3cd8_1003 - xorg-libice=1.1.2=hb9d3cd8_0 - xorg-libsm=1.2.6=he73a12e_0 - xorg-libx11=1.8.4=h0b41bf4_0 - xorg-libxau=1.0.12=hb9d3cd8_0 - xorg-libxdmcp=1.1.5=hb9d3cd8_0 - xorg-libxext=1.3.4=h0b41bf4_2 - xorg-libxrender=0.9.10=h7f98852_1003 - xorg-renderproto=0.11.1=hb9d3cd8_1003 - xorg-xextproto=7.3.0=hb9d3cd8_1004 - xorg-xproto=7.0.31=hb9d3cd8_1008 - xz=5.6.4=hbcc6ac9_0 - xz-gpl-tools=5.6.4=hbcc6ac9_0 - xz-tools=5.6.4=hb9d3cd8_0 - y-py=0.5.4=py37hbd0741f_0 - yaml=0.2.5=h7f98852_2 - yaml-cpp=0.7.0=h59595ed_3 - ypy-websocket=0.8.2=pyhd8ed1ab_0 - zeromq=4.3.5=h59595ed_1 - zipp=3.15.0=pyhd8ed1ab_0 - zstd=1.5.7=hb8e6e7a_2 prefix: /opt/conda/envs/ctwrap
[ "tests/test_wrapper.py::TestInvalid::test_parallel", "tests/test_wrapper.py::TestInvalid::test_serial", "tests/test_wrapper.py::TestInvalid::test_simulation" ]
[ "tests/test_wrapper.py::TestWrap::test_commandline", "tests/test_wrapper.py::TestMatrix::test_commandline", "tests/test_wrapper.py::TestLocal::test_commandline", "tests/test_wrapper.py::TestCustom::test_commandline", "tests/test_wrapper.py::TestIgnition::test_commandline", "tests/test_wrapper.py::TestAdiabaticFlame::test_commandline" ]
[ "tests/test_wrapper.py::TestLegacy::test_handler", "tests/test_wrapper.py::TestWrap::test_handler", "tests/test_wrapper.py::TestWrap::test_parallel", "tests/test_wrapper.py::TestWrap::test_serial", "tests/test_wrapper.py::TestWrap::test_simulation", "tests/test_wrapper.py::TestMatrix::test_handler", "tests/test_wrapper.py::TestMatrix::test_parallel", "tests/test_wrapper.py::TestMatrix::test_serial", "tests/test_wrapper.py::TestMatrix::test_simulation", "tests/test_wrapper.py::TestLocal::test_handler", "tests/test_wrapper.py::TestLocal::test_parallel", "tests/test_wrapper.py::TestLocal::test_serial", "tests/test_wrapper.py::TestLocal::test_simulation", "tests/test_wrapper.py::TestCustom::test_handler", "tests/test_wrapper.py::TestCustom::test_parallel", "tests/test_wrapper.py::TestCustom::test_serial", "tests/test_wrapper.py::TestCustom::test_simulation", "tests/test_wrapper.py::TestIgnition::test_handler", "tests/test_wrapper.py::TestIgnition::test_parallel", "tests/test_wrapper.py::TestIgnition::test_serial", "tests/test_wrapper.py::TestIgnition::test_simulation", "tests/test_wrapper.py::TestAdiabaticFlame::test_handler", "tests/test_wrapper.py::TestAdiabaticFlame::test_parallel", "tests/test_wrapper.py::TestAdiabaticFlame::test_serial", "tests/test_wrapper.py::TestAdiabaticFlame::test_simulation", "tests/test_wrapper.py::TestInvalid::test_commandline", "tests/test_wrapper.py::TestInvalid::test_handler" ]
[]
MIT License
9,185
523
[ "ctwrap/simulation.py" ]
googleapis__python-automl-110
df22fd569124dfc3f9b2568656fb6cddc4bcb07b
2020-12-16 00:05:35
df22fd569124dfc3f9b2568656fb6cddc4bcb07b
munkhuushmgl: I will address the issue via #111 https://source.cloud.google.com/results/invocations/8142cdb2-680b-4d32-b327-f3fffda9ed2c/targets/github%2Fpython-automl%2Fsamples%2Fsnippets/tests
diff --git a/google/cloud/automl_v1beta1/services/tables/tables_client.py b/google/cloud/automl_v1beta1/services/tables/tables_client.py index 21028a3..f6e7889 100644 --- a/google/cloud/automl_v1beta1/services/tables/tables_client.py +++ b/google/cloud/automl_v1beta1/services/tables/tables_client.py @@ -2999,7 +2999,10 @@ class TablesClient(object): ) req = google.cloud.automl_v1beta1.BatchPredictRequest( - name=model_name, input_config=input_request, output_config=output_request, + name=model_name, + input_config=input_request, + output_config=output_request, + params=params, ) method_kwargs = self.__process_request_kwargs(req, **kwargs)
The `params` parameter is ignored in `batch_predict()` method Passing a value to the `params` parameter in the `batch_predict()` method of `TablesClient` class does not have any effect. For example, passing the value `params={'feature_importance': 'true'}` to `batch_predict()` is supposed to have feature importance generated along with the output, but there was none. #### Environment details - OS type and version: Ubuntu 18.04.5 LTS - Python version: 3.5.2, 3.6.10, 3.7.2, 3.8.6 - pip version: 20.2.4 - `google-cloud-automl` version: 2.1.0 #### Steps to reproduce 1. Make a batch prediction with `params` (e.g. `params={'feature_importance': 'true'}`). 2. Invoke the request. 3. Notice that the generated output does not generate feature importance. #### Code example ```python from google.cloud import automl_v1beta1 client = automl_v1beta1.TablesClient(project='my-project', region='us-central1') client.batch_predict( bigquery_input_uri='bq://my-project.dataset.table', bigquery_output_uri='bq://my-project', model_display_name='my_model', params={'feature_importance': 'true'} ) ```
googleapis/python-automl
diff --git a/tests/unit/test_tables_client_v1beta1.py b/tests/unit/test_tables_client_v1beta1.py index 1d5b168..4df06d4 100644 --- a/tests/unit/test_tables_client_v1beta1.py +++ b/tests/unit/test_tables_client_v1beta1.py @@ -1599,6 +1599,24 @@ class TestTablesClient(object): ) ) + def test_batch_predict_bigquery_with_params(self): + client = self.tables_client({}, {}) + client.batch_predict( + model_name="my_model", + bigquery_input_uri="bq://input", + bigquery_output_uri="bq://output", + params={"feature_importance": "true"}, + ) + + client.prediction_client.batch_predict.assert_called_with( + request=automl_v1beta1.BatchPredictRequest( + name="my_model", + input_config={"bigquery_source": {"input_uri": "bq://input"}}, + output_config={"bigquery_destination": {"output_uri": "bq://output"}}, + params={"feature_importance": "true"}, + ) + ) + def test_batch_predict_mixed(self): client = self.tables_client({}, {}) client.batch_predict(
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[pandas,storage]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-asyncio", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.8", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.6.1 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work google-api-core==1.34.1 google-auth==2.38.0 -e git+https://github.com/googleapis/python-automl.git@df22fd569124dfc3f9b2568656fb6cddc4bcb07b#egg=google_cloud_automl google-cloud-core==2.4.3 google-cloud-storage==1.44.0 google-crc32c==1.5.0 google-resumable-media==2.7.2 googleapis-common-protos==1.69.2 grpcio==1.70.0 grpcio-status==1.48.2 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work libcst==1.1.0 mock==5.2.0 mypy-extensions==1.0.0 numpy==1.24.4 packaging @ file:///croot/packaging_1720101850331/work pandas==2.0.3 pluggy==1.5.0 proto-plus==1.26.1 protobuf==3.20.3 pyasn1==0.6.1 pyasn1_modules==0.4.2 pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==5.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 requests==2.32.3 rsa==4.9 six==1.17.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing-inspect==0.9.0 typing_extensions==4.13.0 tzdata==2025.2 urllib3==2.2.3
name: python-automl channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py38h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.1=py38h06a4308_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py38h06a4308_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.6.1 - google-api-core==1.34.1 - google-auth==2.38.0 - google-cloud-core==2.4.3 - google-cloud-storage==1.44.0 - google-crc32c==1.5.0 - google-resumable-media==2.7.2 - googleapis-common-protos==1.69.2 - grpcio==1.70.0 - grpcio-status==1.48.2 - idna==3.10 - libcst==1.1.0 - mock==5.2.0 - mypy-extensions==1.0.0 - numpy==1.24.4 - pandas==2.0.3 - pluggy==1.5.0 - proto-plus==1.26.1 - protobuf==3.20.3 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-asyncio==0.24.0 - pytest-cov==5.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - typing-extensions==4.13.0 - typing-inspect==0.9.0 - tzdata==2025.2 - urllib3==2.2.3 prefix: /opt/conda/envs/python-automl
[ "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_bigquery_with_params" ]
[ "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_prediction_client_client_info" ]
[ "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_datasets_empty", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_datasets_not_empty", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_dataset_no_value", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_dataset_name", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_no_dataset", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_dataset_from_empty_list", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_dataset_from_list_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_dataset_from_list", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_dataset_from_list_ambiguous", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_dataset", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_delete_dataset", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_delete_dataset_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_delete_dataset_name", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_export_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_export_gcs_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_export_bq_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_import_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_import_pandas_dataframe", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_import_pandas_dataframe_init_gcs", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_import_gcs_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_import_gcs_uris", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_import_bq_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_table_specs", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_table_specs_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_table_spec", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_column_spec", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_column_specs", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_display_name_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_name_no_args", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_no_args", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_nullable", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_type_code", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_type_code_nullable", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_update_column_spec_type_code_nullable_false", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_target_column_table_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_target_column_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_target_column", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_weight_column_table_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_weight_column_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_weight_column", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_clear_weight_column", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_test_train_column_table_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_test_train_column_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_test_train_column", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_clear_test_train_column", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_set_time_column", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_clear_time_column", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_model_evaluation", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_model_evaluations_empty", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_model_evaluations_not_empty", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_models_empty", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_list_models_not_empty", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_model_name", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_no_model", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_model_from_empty_list", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_model_from_list_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_model_from_list", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_get_model_from_list_ambiguous", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_delete_model", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_delete_model_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_delete_model_name", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_deploy_model_no_args", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_deploy_model", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_deploy_model_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_undeploy_model", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_undeploy_model_not_found", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_model", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_model_include_columns", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_model_exclude_columns", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_model_invalid_hours_small", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_model_invalid_hours_large", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_model_invalid_no_dataset", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_create_model_invalid_include_exclude", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_predict_from_array", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_predict_from_dict", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_predict_from_dict_with_feature_importance", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_predict_from_dict_missing", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_predict_all_types", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_predict_from_array_missing", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_pandas_dataframe", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_pandas_dataframe_init_gcs", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_gcs", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_bigquery", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_mixed", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_missing_input_gcs_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_missing_input_bigquery_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_missing_output_gcs_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_missing_output_bigquery_uri", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_missing_model", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_batch_predict_no_model", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_auto_ml_client_credentials", "tests/unit/test_tables_client_v1beta1.py::TestTablesClient::test_prediction_client_credentials" ]
[]
Apache License 2.0
9,192
200
[ "google/cloud/automl_v1beta1/services/tables/tables_client.py" ]
numba__numba-6582
0bac18af44d08e913cd512babb9f9b7f6386d30a
2020-12-16 15:49:08
00ad12751725391e8423d67ef845ba56110c55df
diff --git a/numba/core/cgutils.py b/numba/core/cgutils.py index 45c58e9f2..f62216365 100644 --- a/numba/core/cgutils.py +++ b/numba/core/cgutils.py @@ -373,20 +373,28 @@ def alloca_once(builder, ty, size=None, name='', zfill=False): with builder.goto_entry_block(): ptr = builder.alloca(ty, size=size, name=name) # Always zero-fill at init-site. This is safe. - builder.store(ty(None), ptr) + builder.store(ptr.type.pointee(None), ptr) # Also zero-fill at the use-site if zfill: - builder.store(ty(None), ptr) + builder.store(ptr.type.pointee(None), ptr) return ptr -def alloca_once_value(builder, value, name=''): +def sizeof(builder, ptr_type): + """Compute sizeof using GEP + """ + null = ptr_type(None) + offset = null.gep([int32_t(1)]) + return builder.ptrtoint(offset, intp_t) + + +def alloca_once_value(builder, value, name='', zfill=False): """ Like alloca_once(), but passing a *value* instead of a type. The type is inferred and the allocated slot is also initialized with the given value. """ - storage = alloca_once(builder, value.type) + storage = alloca_once(builder, value.type, zfill=zfill) builder.store(value, storage) return storage @@ -907,6 +915,18 @@ def memset(builder, ptr, size, value): builder.call(fn, [ptr, value, size, bool_t(0)]) +def memset_padding(builder, ptr): + """ + Fill padding bytes of the pointee with zeros. + """ + # Load existing value + val = builder.load(ptr) + # Fill pointee with zeros + memset(builder, ptr, sizeof(builder, ptr.type), 0) + # Store value back + builder.store(val, ptr) + + def global_constant(builder_or_module, name, value, linkage='internal'): """ Get or create a (LLVM module-)global constant with *name* or *value*. diff --git a/numba/typed/dictobject.py b/numba/typed/dictobject.py index 9fd6ec4f1..345216d71 100644 --- a/numba/typed/dictobject.py +++ b/numba/typed/dictobject.py @@ -343,6 +343,8 @@ def _dict_insert(typingctx, d, key, hashval, val): data_val = dm_val.as_data(builder, val) ptr_key = cgutils.alloca_once_value(builder, data_key) + cgutils.memset_padding(builder, ptr_key) + ptr_val = cgutils.alloca_once_value(builder, data_val) # TODO: the ptr_oldval is not used. needed for refct ptr_oldval = cgutils.alloca_once(builder, data_val.type) @@ -435,6 +437,7 @@ def _dict_lookup(typingctx, d, key, hashval): data_key = dm_key.as_data(builder, key) ptr_key = cgutils.alloca_once_value(builder, data_key) + cgutils.memset_padding(builder, ptr_key) ll_val = context.get_data_type(td.value_type) ptr_val = cgutils.alloca_once(builder, ll_val)
Dicts with tuple keys not working in 0.52.0 I’m using a dict with a tuple as the key which worked fine in 0.51.2, but does not work in 0.52.0: ``` import numpy as np import numba @numba.njit def testdict(tuptype): tdict = numba.typed.Dict.empty(tuptype, numba.core.types.float32) t1 = np.array([3], dtype=np.uint64) t2 = np.array([5.67], dtype=np.float32) v1 = np.array([10.23], dtype=np.float32) tdict[(t1[0], t2[0])] = v1[0] # 0.51.2 prints True as expected; 0.52.0 prints False print((t1[0], t2[0]) in tdict) testdict( numba.core.types.Tuple((numba.core.types.uint64, numba.core.types.float32))) ``` When I try to check membership of an element that I just stored, 0.52.0 returns False.
numba/numba
diff --git a/numba/tests/test_dictobject.py b/numba/tests/test_dictobject.py index 6e4fa2e1e..c011f3dcd 100644 --- a/numba/tests/test_dictobject.py +++ b/numba/tests/test_dictobject.py @@ -930,6 +930,24 @@ class TestDictObject(MemoryLeakMixin, TestCase): self.assertEqual(foo(), foo.py_func()) + def test_issue6570_alignment_padding(self): + # Create a key type that is 12-bytes long on a 8-byte aligned system + # so that the a 4-byte padding is needed. + # If the 4-byte padding is not zero-filled, it will have garbage data + # that affects key matching in the lookup. + keyty = types.Tuple([types.uint64, types.float32]) + + @njit + def foo(): + d = dictobject.new_dict(keyty, float64) + t1 = np.array([3], dtype=np.uint64) + t2 = np.array([5.67], dtype=np.float32) + v1 = np.array([10.23], dtype=np.float32) + d[(t1[0], t2[0])] = v1[0] + return (t1[0], t2[0]) in d + + self.assertTrue(foo()) + class TestDictTypeCasting(TestCase): def check_good(self, fromty, toty):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.52
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc g++" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 exceptiongroup==1.2.2 execnet==2.0.2 importlib-metadata==6.7.0 iniconfig==2.0.0 llvmlite==0.36.0 -e git+https://github.com/numba/numba.git@0bac18af44d08e913cd512babb9f9b7f6386d30a#egg=numba numpy==1.21.6 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 tomli==2.0.1 typing_extensions==4.7.1 zipp==3.15.0
name: numba channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - exceptiongroup==1.2.2 - execnet==2.0.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - llvmlite==0.36.0 - numpy==1.21.6 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 prefix: /opt/conda/envs/numba
[ "numba/tests/test_dictobject.py::TestDictObject::test_issue6570_alignment_padding" ]
[ "numba/tests/test_dictobject.py::TestDictObject::test_007_collision_checks" ]
[ "numba/tests/test_dictobject.py::TestDictObject::test_001_cannot_downcast_key", "numba/tests/test_dictobject.py::TestDictObject::test_002_cannot_downcast_default", "numba/tests/test_dictobject.py::TestDictObject::test_003_cannot_downcast_key", "numba/tests/test_dictobject.py::TestDictObject::test_004_cannot_downcast_key", "numba/tests/test_dictobject.py::TestDictObject::test_005_cannot_downcast_value", "numba/tests/test_dictobject.py::TestDictObject::test_006_cannot_downcast_key", "numba/tests/test_dictobject.py::TestDictObject::test_008_lifo_popitem", "numba/tests/test_dictobject.py::TestDictObject::test_010_cannot_downcast_default", "numba/tests/test_dictobject.py::TestDictObject::test_011_cannot_downcast_key", "numba/tests/test_dictobject.py::TestDictObject::test_012_cannot_downcast_key", "numba/tests/test_dictobject.py::TestDictObject::test_013_contains_empty_dict", "numba/tests/test_dictobject.py::TestDictObject::test_014_not_contains_empty_dict", "numba/tests/test_dictobject.py::TestDictObject::test_015_dict_clear", "numba/tests/test_dictobject.py::TestDictObject::test_016_cannot_downcast_key", "numba/tests/test_dictobject.py::TestDictObject::test_017_cannot_downcast_default", "numba/tests/test_dictobject.py::TestDictObject::test_018_keys_iter_are_views", "numba/tests/test_dictobject.py::TestDictObject::test_020_string_key", "numba/tests/test_dictobject.py::TestDictObject::test_021_long_str_key", "numba/tests/test_dictobject.py::TestDictObject::test_022_references_juggle", "numba/tests/test_dictobject.py::TestDictObject::test_023_closure", "numba/tests/test_dictobject.py::TestDictObject::test_024_unicode_getitem_keys", "numba/tests/test_dictobject.py::TestDictObject::test_dict_bool", "numba/tests/test_dictobject.py::TestDictObject::test_dict_clear", "numba/tests/test_dictobject.py::TestDictObject::test_dict_contains", "numba/tests/test_dictobject.py::TestDictObject::test_dict_copy", "numba/tests/test_dictobject.py::TestDictObject::test_dict_create", "numba/tests/test_dictobject.py::TestDictObject::test_dict_delitem", "numba/tests/test_dictobject.py::TestDictObject::test_dict_equality", "numba/tests/test_dictobject.py::TestDictObject::test_dict_equality_diff_type", "numba/tests/test_dictobject.py::TestDictObject::test_dict_equality_more", "numba/tests/test_dictobject.py::TestDictObject::test_dict_get", "numba/tests/test_dictobject.py::TestDictObject::test_dict_get_with_default", "numba/tests/test_dictobject.py::TestDictObject::test_dict_getitem", "numba/tests/test_dictobject.py::TestDictObject::test_dict_items", "numba/tests/test_dictobject.py::TestDictObject::test_dict_iter", "numba/tests/test_dictobject.py::TestDictObject::test_dict_keys", "numba/tests/test_dictobject.py::TestDictObject::test_dict_pop", "numba/tests/test_dictobject.py::TestDictObject::test_dict_pop_many", "numba/tests/test_dictobject.py::TestDictObject::test_dict_popitem", "numba/tests/test_dictobject.py::TestDictObject::test_dict_popitem_many", "numba/tests/test_dictobject.py::TestDictObject::test_dict_setdefault", "numba/tests/test_dictobject.py::TestDictObject::test_dict_to_from_meminfo", "numba/tests/test_dictobject.py::TestDictObject::test_dict_values", "numba/tests/test_dictobject.py::TestDictTypeCasting::test_cast_bool_to", "numba/tests/test_dictobject.py::TestDictTypeCasting::test_cast_float_to", "numba/tests/test_dictobject.py::TestDictTypeCasting::test_cast_int_to", "numba/tests/test_dictobject.py::TestTypedDict::test_basic", "numba/tests/test_dictobject.py::TestTypedDict::test_compiled", "numba/tests/test_dictobject.py::TestTypedDict::test_copy_from_dict", "numba/tests/test_dictobject.py::TestTypedDict::test_repr", "numba/tests/test_dictobject.py::TestTypedDict::test_str", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_delitem", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_dict_of_dict_int_keyval", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_dict_of_dict_npm", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_getitem_return_type", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_storage_model_mismatch", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_str_key", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_str_key_array_value", "numba/tests/test_dictobject.py::TestDictRefctTypes::test_str_val", "numba/tests/test_dictobject.py::TestDictForbiddenTypes::test_disallow_list", "numba/tests/test_dictobject.py::TestDictForbiddenTypes::test_disallow_set", "numba/tests/test_dictobject.py::TestDictInferred::test_conflict_key_type_non_number", "numba/tests/test_dictobject.py::TestDictInferred::test_conflicting_key_type", "numba/tests/test_dictobject.py::TestDictInferred::test_conflicting_value_type", "numba/tests/test_dictobject.py::TestDictInferred::test_define_after_use", "numba/tests/test_dictobject.py::TestDictInferred::test_dict_of_dict", "numba/tests/test_dictobject.py::TestDictInferred::test_ifelse_empty_one_branch", "numba/tests/test_dictobject.py::TestDictInferred::test_ifelse_filled_both_branches", "numba/tests/test_dictobject.py::TestDictInferred::test_loop", "numba/tests/test_dictobject.py::TestDictInferred::test_simple_args", "numba/tests/test_dictobject.py::TestDictInferred::test_simple_literal", "numba/tests/test_dictobject.py::TestDictInferred::test_simple_upcast", "numba/tests/test_dictobject.py::TestDictInferred::test_unused", "numba/tests/test_dictobject.py::TestNonCompiledInfer::test_check_untyped_dict_ops", "numba/tests/test_dictobject.py::TestNonCompiledInfer::test_getitem", "numba/tests/test_dictobject.py::TestNonCompiledInfer::test_setdefault", "numba/tests/test_dictobject.py::TestDictWithJitclass::test_jitclass_as_value", "numba/tests/test_dictobject.py::TestNoJit::test_dict_create_no_jit_using_Dict", "numba/tests/test_dictobject.py::TestNoJit::test_dict_create_no_jit_using_empty", "numba/tests/test_dictobject.py::TestNoJit::test_dict_create_no_jit_using_new_dict", "numba/tests/test_dictobject.py::TestDictIterator::test_dict_iterator", "numba/tests/test_dictobject.py::TestTypedDictInitialValues::test_heterogeneous_but_castable_to_homogeneous", "numba/tests/test_dictobject.py::TestTypedDictInitialValues::test_heterogeneous_but_not_castable_to_homogeneous", "numba/tests/test_dictobject.py::TestTypedDictInitialValues::test_homogeneous_and_literal", "numba/tests/test_dictobject.py::TestTypedDictInitialValues::test_mutation_not_carried", "numba/tests/test_dictobject.py::TestTypedDictInitialValues::test_mutation_not_carried_single_function", "numba/tests/test_dictobject.py::TestTypedDictInitialValues::test_unify_across_function_call", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_basic_const_lowering_boxing", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_basic_nonconst_freevar", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_basic_nonconst_in_scope", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_build_map_op_code", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_as_arg", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_items", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_keys", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_not_unify", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_return", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_unify", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_value_coercion", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_values", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_dict_with_single_literallist_value", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_get", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_list_and_array_as_value", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_literal_value", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_mutation_failure", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_read_only", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_repeated_key_literal_value", "numba/tests/test_dictobject.py::TestLiteralStrKeyDict::test_tuple_not_in_mro" ]
[]
BSD 2-Clause "Simplified" License
9,199
835
[ "numba/core/cgutils.py", "numba/typed/dictobject.py" ]
terrapower__armi-226
0c91039a77ed28c33b4e6eb9121fa4ad4fedea34
2020-12-16 20:48:09
a10a3966db9b63d0f75049e34211ac81ba3724ea
ntouran: FYI here's what it ends up looking like in the gallery ![image](https://user-images.githubusercontent.com/53948397/102410859-feaf7f00-3fa5-11eb-8292-31470f7095fe.png) ntouran: The build failure regarding XS settings is a new one and it's caused by a change in a dependency version, probably in voluptuous, which is treating things differently. I can replicate it on a fresh install of ARMI. youngmit: > Yeah this commit added sorted which causes this because there is a `None` in the list getting sorted. [alecthomas/voluptuous@997c8db](https://github.com/alecthomas/voluptuous/commit/997c8dbba92fec3f58b081141c9a4a90e046ada8) Good find! youngmit: Sweet!
diff --git a/armi/bookkeeping/report/reportingUtils.py b/armi/bookkeeping/report/reportingUtils.py index a63435ac..c8259e4f 100644 --- a/armi/bookkeeping/report/reportingUtils.py +++ b/armi/bookkeeping/report/reportingUtils.py @@ -936,15 +936,13 @@ def makeCoreAndAssemblyMaps(r, cs, generateFullCoreMap=False, showBlockAxMesh=Tr assemPlotImage.title = assemPlotImage.title + " ({})".format(plotNum) report.data.Report.groupsOrderFirst.insert(-1, assemPlotImage) report.data.Report.componentWellGroups.insert(-1, assemPlotImage) - assemPlotName = os.path.abspath( - plotting.plotAssemblyTypes( - core.parent.blueprints, - core.name, - assemBatch, - plotNum, - maxAssems=MAX_ASSEMS_PER_ASSEM_PLOT, - showBlockAxMesh=showBlockAxMesh, - ) + assemPlotName = os.path.abspath(f"{core.name}AssemblyTypes{plotNum}.png") + plotting.plotAssemblyTypes( + core.parent.blueprints, + assemPlotName, + assemBatch, + maxAssems=MAX_ASSEMS_PER_ASSEM_PLOT, + showBlockAxMesh=showBlockAxMesh, ) report.setData( "Assem Types {}".format(plotNum), diff --git a/armi/physics/neutronics/crossSectionSettings.py b/armi/physics/neutronics/crossSectionSettings.py index 7a0a473e..4ab6cb28 100644 --- a/armi/physics/neutronics/crossSectionSettings.py +++ b/armi/physics/neutronics/crossSectionSettings.py @@ -35,7 +35,7 @@ from armi.physics.neutronics.crossSectionGroupManager import ( ) # define conf and schema here since this is closest to where the objects live -XS_GEOM_TYPES = {"0D", "2D hex", "1D slab", "1D cylinder", None} +XS_GEOM_TYPES = {"0D", "2D hex", "1D slab", "1D cylinder"} CONF_GEOM = "geometry" CONF_BLOCK_REPRESENTATION = "blockRepresentation" diff --git a/armi/reactor/converters/uniformMesh.py b/armi/reactor/converters/uniformMesh.py index f39d6ee5..f27049dc 100644 --- a/armi/reactor/converters/uniformMesh.py +++ b/armi/reactor/converters/uniformMesh.py @@ -223,11 +223,11 @@ class UniformMeshGeometryConverter(GeometryConverter): def plotConvertedReactor(self): assemsToPlot = self.convReactor.core[:12] for plotNum, assemBatch in enumerate(iterables.chunk(assemsToPlot, 6), start=1): + assemPlotName = f"{self.convReactor.core.name}AssemblyTypes{plotNum}.png" plotting.plotAssemblyTypes( self.convReactor.core.parent.blueprints, - self.convReactor.core.name, + assemPlotName, assemBatch, - plotNum, maxAssems=6, showBlockAxMesh=True, ) diff --git a/armi/utils/plotting.py b/armi/utils/plotting.py index a4ae6a7d..f123a955 100644 --- a/armi/utils/plotting.py +++ b/armi/utils/plotting.py @@ -632,39 +632,41 @@ class DepthSlider(Slider): def plotAssemblyTypes( blueprints, - coreName, + fileName=None, assems=None, - plotNumber=1, maxAssems=None, showBlockAxMesh=True, -): +) -> plt.Figure: """ Generate a plot showing the axial block and enrichment distributions of each assembly type in the core. Parameters ---------- - bluepprints: Blueprints + blueprints: Blueprints The blueprints to plot assembly types of. + fileName : str or None + Base for filename to write, or None for just returning the fig + assems: list list of assembly objects to be plotted. - plotNumber: integer - number of uniquely identify the assembly plot from others and to prevent plots from being overwritten. - maxAssems: integer maximum number of assemblies to plot in the assems list. showBlockAxMesh: bool if true, the axial mesh information will be displayed on the right side of the assembly plot. + + Returns + ------- + fig : plt.Figure + The figure object created """ if assems is None: assems = list(blueprints.assemblies.values()) if not isinstance(assems, (list, set, tuple)): assems = [assems] - if not isinstance(plotNumber, int): - raise TypeError("Plot number should be an integer") if maxAssems is not None and not isinstance(maxAssems, int): raise TypeError("Maximum assemblies should be an integer") @@ -718,17 +720,18 @@ def plotAssemblyTypes( ax.set_yticks([0.0] + list(set(numpy.cumsum(yBlockHeightDiffs)))) ax.xaxis.set_visible(False) - ax.set_title("Assembly Designs for {}".format(coreName), y=1.03) + ax.set_title("Assembly Designs", y=1.03) ax.set_ylabel("Thermally Expanded Axial Heights (cm)".upper(), labelpad=20) ax.set_xlim([0.0, 0.5 + maxAssems * (assemWidth + assemSeparation)]) # Plot and save figure ax.plot() - figName = coreName + "AssemblyTypes{}.png".format(plotNumber) - runLog.debug("Writing assem layout {} in {}".format(figName, os.getcwd())) - fig.savefig(figName) - plt.close(fig) - return figName + if fileName: + fig.savefig(fileName) + runLog.debug("Writing assem layout {} in {}".format(fileName, os.getcwd())) + plt.close(fig) + + return fig def _plotBlocksInAssembly( diff --git a/doc/gallery-src/framework/run_programmaticReactorDefinition.py b/doc/gallery-src/framework/run_programmaticReactorDefinition.py new file mode 100644 index 00000000..a9b031d0 --- /dev/null +++ b/doc/gallery-src/framework/run_programmaticReactorDefinition.py @@ -0,0 +1,222 @@ +""" +Build Reactor Inputs Programmatically +===================================== + +Sometimes it's desirable to build input definitions for ARMI using +code rather than by writing the textual input files directly. +In ARMI you can either make the ARMI reactor objects directly, +or you can define Blueprints objects. The benefit of making Blueprints +objects is that they can in turn be used to create both ARMI reactor +objects as well as textual input itself. This is nice when you want to +have traceable input files associated with a run that was developed +programmatically (e.g. for parameter sweeps). + +This example shows how to make Blueprints objects programmatically completely +from scratch. + +""" +import matplotlib.pyplot as plt +import armi + +armi.configure(permissive=True) +# pylint: disable=wrong-import-position +from armi.reactor import blueprints +from armi.settings import caseSettings +from armi.reactor.blueprints import isotopicOptions +from armi.reactor.blueprints import assemblyBlueprint +from armi.reactor.blueprints import blockBlueprint +from armi.reactor.blueprints import componentBlueprint +from armi.reactor.blueprints import gridBlueprint +from armi.reactor.blueprints import reactorBlueprint +from armi.utils import plotting +from armi import cases + + +def buildCase(): + """Build input components and a case.""" + bp = blueprints.Blueprints() + bp.customIsotopics = isotopicOptions.CustomIsotopics() + bp.nuclideFlags = isotopicOptions.genDefaultNucFlags() + + components = buildComponents() + bp.blockDesigns = buildBlocks(components) + bp.assemDesigns = buildAssemblies(bp.blockDesigns) + bp.gridDesigns = buildGrids() + bp.systemDesigns = buildSystems() + + cs = caseSettings.Settings() + cs.path = None + cs.caseTitle = "scripted-case" + case = cases.Case(cs=cs, bp=bp) + + return case + + +def buildComponents(): + ISOTHERMAL_TEMPERATURE_IN_C = 450.0 + fuel = componentBlueprint.ComponentBlueprint() + fuel.name = "fuel" + fuel.shape = "Circle" + fuel.mult = 217 + fuel.material = "Custom" + fuel.Tinput = ISOTHERMAL_TEMPERATURE_IN_C + fuel.Thot = ISOTHERMAL_TEMPERATURE_IN_C + fuel.id = 0.0 + fuel.od = 0.4 + + clad = componentBlueprint.ComponentBlueprint() + clad.name = "clad" + clad.mult = "fuel.mult" + clad.shape = "Circle" + clad.material = "HT9" + clad.Tinput = ISOTHERMAL_TEMPERATURE_IN_C + clad.Thot = ISOTHERMAL_TEMPERATURE_IN_C + clad.id = 0.508 + clad.od = 0.5842 + + gap = componentBlueprint.ComponentBlueprint() + gap.name = "gap" + gap.shape = "Circle" + gap.mult = "fuel.mult" + gap.material = "Void" + gap.Tinput = ISOTHERMAL_TEMPERATURE_IN_C + gap.Thot = ISOTHERMAL_TEMPERATURE_IN_C + gap.id = "fuel.od" + gap.od = "clad.id" + + wire = componentBlueprint.ComponentBlueprint() + wire.name = "wire" + wire.mult = "fuel.mult" + wire.shape = "Helix" + wire.material = "HT9" + wire.Tinput = ISOTHERMAL_TEMPERATURE_IN_C + wire.Thot = ISOTHERMAL_TEMPERATURE_IN_C + wire.id = 0.0 + wire.od = 0.14224 + wire.axialPitch = 30.48 + wire.helixDiameter = 0.72644 + + duct = componentBlueprint.ComponentBlueprint() + duct.name = "duct" + duct.mult = 1 + duct.shape = "Hexagon" + duct.material = "HT9" + duct.Tinput = ISOTHERMAL_TEMPERATURE_IN_C + duct.Thot = ISOTHERMAL_TEMPERATURE_IN_C + duct.ip = 11.0109 + duct.op = 11.6205 + + intercoolant = componentBlueprint.ComponentBlueprint() + intercoolant.name = "intercoolant" + intercoolant.mult = 1 + intercoolant.shape = "Hexagon" + intercoolant.material = "Sodium" + intercoolant.Tinput = ISOTHERMAL_TEMPERATURE_IN_C + intercoolant.Thot = ISOTHERMAL_TEMPERATURE_IN_C + intercoolant.ip = "duct.op" + intercoolant.op = 12.01420 + + coolant = componentBlueprint.ComponentBlueprint() + coolant.name = "coolant" + coolant.shape = "DerivedShape" + coolant.material = "Sodium" + coolant.Tinput = ISOTHERMAL_TEMPERATURE_IN_C + coolant.Thot = ISOTHERMAL_TEMPERATURE_IN_C + + componentBlueprints = { + c.name: c for c in [fuel, gap, clad, wire, duct, intercoolant, coolant] + } + + return componentBlueprints + + +def buildBlocks(components): + """Build block blueprints""" + blocks = blockBlueprint.BlockKeyedList() + fuel = blockBlueprint.BlockBlueprint() + fuel.name = "fuel" + for cname, c in components.items(): + fuel[cname] = c + blocks[fuel.name] = fuel + + reflector = blockBlueprint.BlockBlueprint() + reflector.name = "reflector" + reflector["coolant"] = components["coolant"] + reflector["duct"] = components["duct"] + blocks[reflector.name] = reflector + + return blocks + + +def buildAssemblies(blockDesigns): + """Build assembly blueprints""" + fuelBock, reflectorBlock = blockDesigns["fuel"], blockDesigns["reflector"] + + assemblies = assemblyBlueprint.AssemblyKeyedList() + + fuelAssem = assemblyBlueprint.AssemblyBlueprint() + fuelAssem.name = "Fuel" + fuelAssem.specifier = "IC" + + fuelAssem.blocks = blockBlueprint.BlockList() + fuelAssem.blocks.extend( + [reflectorBlock, fuelBock, fuelBock, fuelBock, reflectorBlock] + ) + fuelAssem.height = [10, 20, 20, 20, 10] + fuelAssem.xsTypes = ["A"] * 5 + fuelAssem.axialMeshPoints = [1] * 5 + + assemblies[fuelAssem.name] = fuelAssem + + reflectorAssem = assemblyBlueprint.AssemblyBlueprint() + reflectorAssem.name = "Reflector" + reflectorAssem.specifier = "RR" + reflectorAssem.blocks = blockBlueprint.BlockList() + reflectorAssem.blocks.extend([reflectorBlock] * 5) + reflectorAssem.height = [10, 20, 20, 20, 10] + reflectorAssem.xsTypes = ["A"] * 5 + reflectorAssem.axialMeshPoints = [1] * 5 + assemblies[reflectorAssem.name] = reflectorAssem + + return assemblies + + +def buildGrids(): + """Build the core map grid""" + + coreGrid = gridBlueprint.GridBlueprint("core") + coreGrid.geom = "hex" + coreGrid.symmetry = "third periodic" + coreGrid.origin = gridBlueprint.Triplet() + + coreGrid.latticeMap = """ + RR RR + IC RR + IC IC RR""" + + grids = gridBlueprint.Grids() + grids["core"] = coreGrid + return grids + + +def buildSystems(): + """Build the core system""" + systems = reactorBlueprint.Systems() + core = reactorBlueprint.SystemBlueprint("core", "core", gridBlueprint.Triplet()) + systems["core"] = core + return systems + + +if __name__ == "__main__": + case = buildCase() + # build ARMI objects + o = case.initializeOperator() + fig = plotting.plotAssemblyTypes( + case.bp, + None, + showBlockAxMesh=True, + ) + plt.show() + + # also write input files + case.writeInputs()
Make example showing how to make blueprints objects in a script To potentially help with #223, we should make a new example (in the gallery? in blueprints docs?) that shows how to build reactor inputs from a script by using Blueprints objects. This is likely a better option than writing out yaml code directly for script-generated ARMI inputs. For example, let's make a script that builds a few basic reactor parts and then either: * Generates the input files * Builds the ARMI objects
terrapower/armi
diff --git a/armi/cli/tests/test_runEntryPoint.py b/armi/cli/tests/test_runEntryPoint.py new file mode 100644 index 00000000..ad8a7614 --- /dev/null +++ b/armi/cli/tests/test_runEntryPoint.py @@ -0,0 +1,22 @@ +""" +Test for run cli entry point +""" +import unittest + +import sys + +from armi.__main__ import main + + +class TestRun(unittest.TestCase): + def test_runCommand(self): + """Ensure main entry point with no args completes.""" + with self.assertRaises(SystemExit) as excinfo: + sys.argv = [""] # have to override the pytest args + main() + self.assertEqual(excinfo.exception.code, 0) + + +if __name__ == "__main__": + # import sys;sys.argv = ['', 'Test.testName'] + unittest.main() diff --git a/armi/utils/tests/test_plotting.py b/armi/utils/tests/test_plotting.py index 5e1252db..435215a1 100644 --- a/armi/utils/tests/test_plotting.py +++ b/armi/utils/tests/test_plotting.py @@ -50,10 +50,10 @@ class TestPlotting(unittest.TestCase): self._checkExists(fName) def test_plotAssemblyTypes(self): - fName = plotting.plotAssemblyTypes( - self.r.core.parent.blueprints, self.r.core.name + plotting.plotAssemblyTypes( + self.r.core.parent.blueprints, "coreAssemblyTypes1.png" ) - self._checkExists(fName) + self._checkExists("coreAssemblyTypes1.png") def _checkExists(self, fName): self.assertTrue(os.path.exists(fName))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 4 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc libopenmpi-dev" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 anyio==3.7.1 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 -e git+https://github.com/terrapower/armi.git@0c91039a77ed28c33b4e6eb9121fa4ad4fedea34#egg=armi astroid==2.15.8 attrs==24.2.0 Babel==2.14.0 backcall==0.2.0 beautifulsoup4==4.13.3 black==23.3.0 bleach==6.0.0 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 charset-normalizer==3.4.1 click==8.1.8 configparser==5.3.0 coverage==7.2.7 cycler==0.11.0 debugpy==1.7.0 decorator==5.1.1 defusedxml==0.7.1 dill==0.3.7 docutils==0.19 entrypoints==0.4 exceptiongroup==1.2.2 execnet==2.0.2 fastjsonschema==2.21.1 fonttools==4.38.0 future==1.0.0 h5py==2.10.0 idna==3.10 imagesize==1.4.1 importlib-metadata==6.7.0 importlib-resources==5.12.0 iniconfig==2.0.0 ipykernel==6.16.2 ipython==7.34.0 ipython-genutils==0.2.0 isort==5.11.5 jedi==0.19.2 Jinja2==3.1.6 jsonschema==4.17.3 jupyter-contrib-core==0.4.2 jupyter-contrib-nbextensions==0.7.0 jupyter-highlight-selected-word==0.2.0 jupyter-server==1.24.0 jupyter_client==7.4.9 jupyter_core==4.12.0 jupyter_nbextensions_configurator==0.6.4 jupyterlab-pygments==0.2.2 kiwisolver==1.4.5 lazy-object-proxy==1.9.0 lxml==5.3.1 Mako==1.2.4 MarkupSafe==2.1.5 matplotlib==3.5.3 matplotlib-inline==0.1.6 mccabe==0.7.0 mistune==3.0.2 mypy-extensions==1.0.0 nbclassic==1.2.0 nbclient==0.7.4 nbconvert==7.6.0 nbformat==5.8.0 nbsphinx==0.9.7 nbsphinx-link==1.3.1 nest-asyncio==1.6.0 notebook==6.5.7 notebook_shim==0.2.4 numpy==1.19.3 ordered-set==4.1.0 packaging==24.0 pandoc==2.4 pandocfilters==1.5.1 parso==0.8.4 pathspec==0.11.2 pbr==6.1.1 pexpect==4.9.0 pickleshare==0.7.5 Pillow==9.5.0 pkgutil_resolve_name==1.3.10 platformdirs==4.0.0 pluggy==1.2.0 plumbum==1.8.3 ply==3.11 prometheus-client==0.17.1 prompt_toolkit==3.0.48 psutil==7.0.0 ptyprocess==0.7.0 py==1.11.0 pycparser==2.21 pyevtk==1.6.0 Pygments==2.17.2 pylint==2.17.7 Pympler==1.1 pyparsing==3.1.4 pyrsistent==0.19.3 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-html==3.2.0 pytest-metadata==3.0.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==26.2.1 requests==2.31.0 ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.8 scipy==1.7.3 Send2Trash==1.8.3 six==1.17.0 sniffio==1.3.1 snowballstemmer==2.2.0 soupsieve==2.4.1 Sphinx==5.3.0 sphinx-gallery==0.14.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-apidoc==0.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 sphinxext-opengraph==0.8.2 tabulate==0.9.0 terminado==0.17.1 tinycss2==1.2.1 tomli==2.0.1 tomlkit==0.12.5 tornado==6.2 traitlets==5.9.0 typed-ast==1.5.5 typing_extensions==4.7.1 urllib3==2.0.7 voluptuous==0.14.1 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.6.1 wrapt==1.16.0 xlrd==2.0.1 yamlize==0.7.1 zipp==3.15.0
name: armi channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - anyio==3.7.1 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - astroid==2.15.8 - attrs==24.2.0 - babel==2.14.0 - backcall==0.2.0 - beautifulsoup4==4.13.3 - black==23.3.0 - bleach==6.0.0 - cffi==1.15.1 - charset-normalizer==3.4.1 - click==8.1.8 - configparser==5.3.0 - coverage==7.2.7 - cycler==0.11.0 - debugpy==1.7.0 - decorator==5.1.1 - defusedxml==0.7.1 - dill==0.3.7 - docutils==0.19 - entrypoints==0.4 - exceptiongroup==1.2.2 - execnet==2.0.2 - fastjsonschema==2.21.1 - fonttools==4.38.0 - future==1.0.0 - h5py==2.10.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==6.7.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - ipykernel==6.16.2 - ipython==7.34.0 - ipython-genutils==0.2.0 - isort==5.11.5 - jedi==0.19.2 - jinja2==3.1.6 - jsonschema==4.17.3 - jupyter-client==7.4.9 - jupyter-contrib-core==0.4.2 - jupyter-contrib-nbextensions==0.7.0 - jupyter-core==4.12.0 - jupyter-highlight-selected-word==0.2.0 - jupyter-nbextensions-configurator==0.6.4 - jupyter-server==1.24.0 - jupyterlab-pygments==0.2.2 - kiwisolver==1.4.5 - lazy-object-proxy==1.9.0 - lxml==5.3.1 - mako==1.2.4 - markupsafe==2.1.5 - matplotlib==3.5.3 - matplotlib-inline==0.1.6 - mccabe==0.7.0 - mistune==3.0.2 - mypy-extensions==1.0.0 - nbclassic==1.2.0 - nbclient==0.7.4 - nbconvert==7.6.0 - nbformat==5.8.0 - nbsphinx==0.9.7 - nbsphinx-link==1.3.1 - nest-asyncio==1.6.0 - notebook==6.5.7 - notebook-shim==0.2.4 - numpy==1.19.3 - ordered-set==4.1.0 - packaging==24.0 - pandoc==2.4 - pandocfilters==1.5.1 - parso==0.8.4 - pathspec==0.11.2 - pbr==6.1.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pillow==9.5.0 - pkgutil-resolve-name==1.3.10 - platformdirs==4.0.0 - pluggy==1.2.0 - plumbum==1.8.3 - ply==3.11 - prometheus-client==0.17.1 - prompt-toolkit==3.0.48 - psutil==7.0.0 - ptyprocess==0.7.0 - py==1.11.0 - pycparser==2.21 - pyevtk==1.6.0 - pygments==2.17.2 - pylint==2.17.7 - pympler==1.1 - pyparsing==3.1.4 - pyrsistent==0.19.3 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-html==3.2.0 - pytest-metadata==3.0.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==26.2.1 - requests==2.31.0 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.8 - scipy==1.7.3 - send2trash==1.8.3 - six==1.17.0 - sniffio==1.3.1 - snowballstemmer==2.2.0 - soupsieve==2.4.1 - sphinx==5.3.0 - sphinx-gallery==0.14.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-apidoc==0.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - sphinxext-opengraph==0.8.2 - tabulate==0.9.0 - terminado==0.17.1 - tinycss2==1.2.1 - tomli==2.0.1 - tomlkit==0.12.5 - tornado==6.2 - traitlets==5.9.0 - typed-ast==1.5.5 - typing-extensions==4.7.1 - urllib3==2.0.7 - voluptuous==0.14.1 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.6.1 - wrapt==1.16.0 - xlrd==2.0.1 - yamlize==0.7.1 - zipp==3.15.0 prefix: /opt/conda/envs/armi
[ "armi/utils/tests/test_plotting.py::TestPlotting::test_plotAssemblyTypes" ]
[]
[ "armi/cli/tests/test_runEntryPoint.py::TestRun::test_runCommand", "armi/utils/tests/test_plotting.py::TestPlotting::test_plotDepthMap" ]
[]
Apache License 2.0
9,204
3,530
[ "armi/bookkeeping/report/reportingUtils.py", "armi/physics/neutronics/crossSectionSettings.py", "armi/reactor/converters/uniformMesh.py", "armi/utils/plotting.py" ]
numba__numba-6586
0bac18af44d08e913cd512babb9f9b7f6386d30a
2020-12-17 12:10:12
00ad12751725391e8423d67ef845ba56110c55df
diff --git a/numba/core/compiler.py b/numba/core/compiler.py index 163292f9e..a0c03fdb3 100644 --- a/numba/core/compiler.py +++ b/numba/core/compiler.py @@ -514,14 +514,17 @@ class DefaultPassBuilder(object): pm.add_pass(IRProcessing, "processing IR") pm.add_pass(WithLifting, "Handle with contexts") + # inline closures early in case they are using nonlocal's + # see issue #6585. + pm.add_pass(InlineClosureLikes, + "inline calls to locally defined closures") + # pre typing if not state.flags.no_rewrites: pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants") pm.add_pass(DeadBranchPrune, "dead branch pruning") pm.add_pass(GenericRewrites, "nopython rewrites") - pm.add_pass(InlineClosureLikes, - "inline calls to locally defined closures") # convert any remaining closures into functions pm.add_pass(MakeFunctionToJitFunction, "convert make_function into JIT functions")
Unexpected behaviour of `and` in guvectorize returned from factory function (bad specialisation of bool as int64?) - [x] I have tried using the latest released version of Numba (most recent is visible in the change log (https://github.com/numba/numba/blob/master/CHANGE_LOG). - [x] I have included a self contained code sample to reproduce the problem. i.e. it's possible to run as 'python bug.py'. Given the following code ``` import numba import numpy as np def mk_count_vote(min_samples): @numba.guvectorize(["int32[:], int32[:]", "int64[:], int64[:]"], '(n)->()', nopython=True) def count_vote(nbr_labels, res): max_elem = -1 max_count = 0 num_maxes = 0 cur_elem = -1 cur_count = 0 def flush(): nonlocal max_count, num_maxes, max_elem if cur_count > max_count: max_count = cur_count num_maxes = 1 max_elem = cur_elem elif cur_count == max_count: num_maxes += 1 for nbr_label in nbr_labels: if nbr_label == -1: break elif nbr_label != cur_elem: flush() cur_elem = nbr_label cur_count = 1 else: cur_count += 1 flush() # XXX: Changing the first part to bool(num_maxes == 1) fixes the problem if num_maxes == 1 and ((max_count - 1) >= min_samples): print("Setting to", max_elem) res[0] = max_elem else: print("Setting to -1") res[0] = -1 return count_vote ARR = np.array([[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,], [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,]], dtype=np.int64) count_vote = mk_count_vote(3) print(count_vote(ARR, axis=1)) ``` I get: ``` Setting to -1 Setting to -1 [-1 -1] ``` If I go ahead and make the change to the XXX line then I get ``` Setting to -1 Setting to 2 [-1 2] ``` Which is what I would expect the original program to produce. Some tracing reveals that `(num_maxes == 1)` is `int64` while `(max_count - 1) >= min_samples` is `bool`, which is what prompted me to add the `bool(...)` casting. I suppose therefore that this is some kind of bad specialization. I am running Debian GNU/Linux bullseye/sid and have checked with numba==0.52.0.
numba/numba
diff --git a/numba/tests/test_analysis.py b/numba/tests/test_analysis.py index 35b73bfcc..b09a7c9e2 100644 --- a/numba/tests/test_analysis.py +++ b/numba/tests/test_analysis.py @@ -5,7 +5,9 @@ import types as pytypes import numpy as np from numba.core.compiler import compile_isolated, run_frontend, Flags, StateDict from numba import jit, njit -from numba.core import types, errors, ir, rewrites, ir_utils, utils +from numba.core import types, errors, ir, rewrites, ir_utils, utils, cpu +from numba.core import postproc +from numba.core.inline_closurecall import InlineClosureCallPass from numba.tests.support import TestCase, MemoryLeakMixin, SerialMixin from numba.core.analysis import dead_branch_prune, rewrite_semantic_constants @@ -61,10 +63,24 @@ class TestBranchPruneBase(MemoryLeakMixin, TestCase): before = func_ir.copy() if self._DEBUG: print("=" * 80) - print("before prune") + print("before inline") func_ir.dump() + # run closure inlining to ensure that nonlocals in closures are visible + inline_pass = InlineClosureCallPass(func_ir, + cpu.ParallelOptions(False),) + inline_pass.run() + + # Remove all Dels, and re-run postproc + post_proc = postproc.PostProcessor(func_ir) + post_proc.run() + rewrite_semantic_constants(func_ir, args_tys) + if self._DEBUG: + print("=" * 80) + print("before prune") + func_ir.dump() + dead_branch_prune(func_ir, args_tys) after = func_ir @@ -566,6 +582,44 @@ class TestBranchPrune(TestBranchPruneBase, SerialMixin): [None, None], np.zeros((2, 3)), 1.2, None) + def test_closure_and_nonlocal_can_prune(self): + # Closures must be inlined ahead of branch pruning in case nonlocal + # is used. See issue #6585. + def impl(): + x = 1000 + + def closure(): + nonlocal x + x = 0 + + closure() + + if x == 0: + return True + else: + return False + + self.assert_prune(impl, (), [False,],) + + def test_closure_and_nonlocal_cannot_prune(self): + # Closures must be inlined ahead of branch pruning in case nonlocal + # is used. See issue #6585. + def impl(n): + x = 1000 + + def closure(t): + nonlocal x + x = t + + closure(n) + + if x == 0: + return True + else: + return False + + self.assert_prune(impl, (types.int64,), [None,], 1) + class TestBranchPrunePredicates(TestBranchPruneBase, SerialMixin): # Really important thing to remember... the branch on predicates end up as
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.52
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc g++" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 exceptiongroup==1.2.2 execnet==2.0.2 importlib-metadata==6.7.0 iniconfig==2.0.0 llvmlite==0.36.0 -e git+https://github.com/numba/numba.git@0bac18af44d08e913cd512babb9f9b7f6386d30a#egg=numba numpy==1.21.6 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 tomli==2.0.1 typing_extensions==4.7.1 zipp==3.15.0
name: numba channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - exceptiongroup==1.2.2 - execnet==2.0.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - llvmlite==0.36.0 - numpy==1.21.6 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 prefix: /opt/conda/envs/numba
[ "numba/tests/test_analysis.py::TestBranchPrune::test_closure_and_nonlocal_can_prune" ]
[]
[ "numba/tests/test_analysis.py::TestBranchPrune::test_closure_and_nonlocal_cannot_prune", "numba/tests/test_analysis.py::TestBranchPrune::test_comparison_operators", "numba/tests/test_analysis.py::TestBranchPrune::test_cond_is_kwarg_none", "numba/tests/test_analysis.py::TestBranchPrune::test_cond_is_kwarg_value", "numba/tests/test_analysis.py::TestBranchPrune::test_cond_rewrite_is_correct", "numba/tests/test_analysis.py::TestBranchPrune::test_double_if_else_non_literal_const", "numba/tests/test_analysis.py::TestBranchPrune::test_double_if_else_rt_const", "numba/tests/test_analysis.py::TestBranchPrune::test_freevar_bake_in", "numba/tests/test_analysis.py::TestBranchPrune::test_global_bake_in", "numba/tests/test_analysis.py::TestBranchPrune::test_obj_mode_fallback", "numba/tests/test_analysis.py::TestBranchPrune::test_redefined_variables_are_not_considered_in_prune", "numba/tests/test_analysis.py::TestBranchPrune::test_redefinition_analysis_different_block_can_exec", "numba/tests/test_analysis.py::TestBranchPrune::test_redefinition_analysis_different_block_cannot_exec", "numba/tests/test_analysis.py::TestBranchPrune::test_redefinition_analysis_same_block", "numba/tests/test_analysis.py::TestBranchPrune::test_single_if", "numba/tests/test_analysis.py::TestBranchPrune::test_single_if_const_val", "numba/tests/test_analysis.py::TestBranchPrune::test_single_if_else", "numba/tests/test_analysis.py::TestBranchPrune::test_single_if_else_two_const_val", "numba/tests/test_analysis.py::TestBranchPrune::test_single_if_else_w_following_undetermined", "numba/tests/test_analysis.py::TestBranchPrune::test_single_two_branches_same_cond", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_issue_5618", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_literal_const_code_gen", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_const", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_else_const", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_else_freevar", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_else_global", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_else_negate_const", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_else_negate_freevar", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_else_negate_global", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_freevar", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_global", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_negate_const", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_negate_freevar", "numba/tests/test_analysis.py::TestBranchPrunePredicates::test_single_if_negate_global", "numba/tests/test_analysis.py::TestBranchPrunePostSemanticConstRewrites::test_array_ndim_attr", "numba/tests/test_analysis.py::TestBranchPrunePostSemanticConstRewrites::test_attr_not_len", "numba/tests/test_analysis.py::TestBranchPrunePostSemanticConstRewrites::test_ndim_not_on_array", "numba/tests/test_analysis.py::TestBranchPrunePostSemanticConstRewrites::test_semantic_const_propagates_before_static_rewrites", "numba/tests/test_analysis.py::TestBranchPrunePostSemanticConstRewrites::test_tuple_len" ]
[]
BSD 2-Clause "Simplified" License
9,209
269
[ "numba/core/compiler.py" ]
kangasta__fdbk-90
a955ad766618af5f21e475a603ca8513d90ef1ab
2020-12-20 01:24:41
a955ad766618af5f21e475a603ca8513d90ef1ab
diff --git a/fdbk/data_tools/_run.py b/fdbk/data_tools/_run.py index 442a1ef..7e7ec06 100644 --- a/fdbk/data_tools/_run.py +++ b/fdbk/data_tools/_run.py @@ -94,7 +94,7 @@ def run_data_tools( instruction.get("field"), instruction.get("parameters") ) - except ValueError as error: + except (AssertionError, ValueError) as error: warnings.append(str(error)) result = None diff --git a/fdbk/data_tools/functions/_status_funcs.py b/fdbk/data_tools/functions/_status_funcs.py index 3756fc4..6a49fd5 100644 --- a/fdbk/data_tools/functions/_status_funcs.py +++ b/fdbk/data_tools/functions/_status_funcs.py @@ -21,11 +21,14 @@ OPERATORS = { def _get_value(method, data, field, parameters=None): + if method not in functions: + raise ValueError(method_not_supported(method)) + value_d = functions.get(method)(data, field, parameters) return value_d.get("payload", {}).get("value") -def _get_parameters(parameters=None): +def _get_status_parameters(parameters=None): default = parameters.get("default") checks = parameters.get("checks", []) short_circuit = parameters.get("short_circuit", False) @@ -34,6 +37,14 @@ def _get_parameters(parameters=None): return default, checks, short_circuit, method +def _get_warning_parameters(parameters=None): + check = parameters.get("check") + message = parameters.get("message") + method = parameters.get("method", "latest") + + return check, message, method + + def _run_assertion(assertion, value, other): if assertion not in ASSERTIONS: # pragma: no cover raise RuntimeError(f"Assertion {assertion} was not recognized") @@ -45,7 +56,7 @@ def _run_assertion(assertion, value, other): def _run_check(value, check): - status = check.get("status") + status = check.get("status", 'WARNING') operator = str(check.get("operator", 'or')).lower() result = False if operator == 'or' else True @@ -72,13 +83,11 @@ def status(data, field, parameters=None): warnings = [] try: - default, checks, short_circuit, method = _get_parameters(parameters) + default, checks, short_circuit, method = _get_status_parameters( + parameters) except BaseException: return None - if method not in functions: - raise ValueError(method_not_supported(method)) - value = _get_value(method, data, field, parameters) status_d = dict(field=field, status=default, reason=None) @@ -104,6 +113,26 @@ def status(data, field, parameters=None): return status_dict(**status_d) +def warning(data, field, parameters=None): + if not len(data): + return None + + try: + check, message, method = _get_warning_parameters(parameters) + except BaseException: + return None + + if not check or not message: + return None + + value = _get_value(method, data, field, parameters) + warning = _run_check(value, check) + + if warning: + raise AssertionError(message) + + STATUS_FUNCS = dict( - status=status + status=status, + warning=warning, )
Data tools should support custom warnings Forked from #45. Add option to status data tool to show warning instead of updating status. This could be a separate `warning` function.
kangasta/fdbk
diff --git a/tst/test_data_tools.py b/tst/test_data_tools.py index bdd1200..3fc8b96 100644 --- a/tst/test_data_tools.py +++ b/tst/test_data_tools.py @@ -217,3 +217,29 @@ class DataToolsTest(TestCase): self.assertEqual(aggregated[0]["number"], 1) self.assertEqual(aggregated[1]["number"], 3) self.assertEqual(warnings, []) + + def test_warning_functions(self): + topic_d = STATUS_TOPIC + data = generate_test_data() + + for check, expected in [ + (dict(operator='and', gte=4, lte=6), ["Test warning"]), + (dict(operator='and', gte=6, lte=4), []), + ]: + parameters=dict( + method="average", + message="Test warning", + check=check, + ) + + topic_d['data_tools'] = [ + dict(field='number', method="warning", parameters=parameters), + ] + + results, warnings = run_data_tools(topic_d, data) + + self.assertEqual(len(results), 0) + self.assertEqual(len(warnings), len(expected)) + + if expected: + self.assertEqual(warnings[0], "Test warning")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 2 }
3.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 attrs==25.3.0 blinker==1.9.0 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 coverage==7.8.0 dill==0.3.9 exceptiongroup==1.2.2 -e git+https://github.com/kangasta/fdbk.git@a955ad766618af5f21e475a603ca8513d90ef1ab#egg=fdbk Flask==3.1.0 freezegun==1.5.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 isort==6.0.1 itsdangerous==2.2.0 Jinja2==3.1.6 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 MarkupSafe==3.0.2 mccabe==0.7.0 mock==5.2.0 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pylint==3.3.6 pytest==8.3.5 python-dateutil==2.9.0.post0 PyYAML==6.0.2 referencing==0.36.2 requests==2.32.3 rpds-py==0.24.0 six==1.17.0 swebench_matterhorn @ file:///swebench_matterhorn tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0 urllib3==2.3.0 Werkzeug==3.1.3 zipp==3.21.0
name: fdbk channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - attrs==25.3.0 - blinker==1.9.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - coverage==7.8.0 - dill==0.3.9 - exceptiongroup==1.2.2 - flask==3.1.0 - freezegun==1.5.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - isort==6.0.1 - itsdangerous==2.2.0 - jinja2==3.1.6 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - markupsafe==3.0.2 - mccabe==0.7.0 - mock==5.2.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pylint==3.3.6 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - referencing==0.36.2 - requests==2.32.3 - rpds-py==0.24.0 - six==1.17.0 - swebench-matterhorn==0.0.0 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 - urllib3==2.3.0 - werkzeug==3.1.3 - zipp==3.21.0 prefix: /opt/conda/envs/fdbk
[ "tst/test_data_tools.py::DataToolsTest::test_warning_functions" ]
[]
[ "tst/test_data_tools.py::DataToolsTest::test_aggregate", "tst/test_data_tools.py::DataToolsTest::test_aggregate_always", "tst/test_data_tools.py::DataToolsTest::test_aggregate_empty_window", "tst/test_data_tools.py::DataToolsTest::test_aggregate_min", "tst/test_data_tools.py::DataToolsTest::test_aggregate_unknown_data_tool", "tst/test_data_tools.py::DataToolsTest::test_collection_functions_unit", "tst/test_data_tools.py::DataToolsTest::test_invalid_functions_in_collection", "tst/test_data_tools.py::DataToolsTest::test_run_data_tools_empty_data", "tst/test_data_tools.py::DataToolsTest::test_status_functions", "tst/test_data_tools.py::DataToolsTest::test_status_functions_in_collection", "tst/test_data_tools.py::DataToolsTest::test_status_functions_warnings", "tst/test_data_tools.py::DataToolsTest::test_summary_funcs_return_none_on_empty_data", "tst/test_data_tools.py::DataToolsTest::test_value_functions" ]
[]
MIT License
9,232
825
[ "fdbk/data_tools/_run.py", "fdbk/data_tools/functions/_status_funcs.py" ]
tefra__xsdata-364
ff428d68c61f254609465012cc62c49f3b88e575
2020-12-20 15:52:19
c31e7c4951671c0bc135cd831bdf39fe34f7a36a
sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/bug.png' alt='Bug' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=BUG) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=BUG) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/vulnerability.png' alt='Vulnerability' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=VULNERABILITY) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=VULNERABILITY) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/security_hotspot.png' alt='Security Hotspot' width='16' height='16' />](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=364&resolved=false&types=SECURITY_HOTSPOT) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=364&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=364&resolved=false&types=SECURITY_HOTSPOT) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/code_smell.png' alt='Code Smell' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=CODE_SMELL) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/RatingBadge/A.png' alt='A' width='16' height='16' />](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=364&resolved=false&types=CODE_SMELL) [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/CoverageChart/NoCoverageInfo.png' alt='No Coverage information' width='16' height='16' />](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=364) No Coverage information [<img src='https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/Duplications/3.png' alt='0.0%' width='16' height='16' />](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=364&metric=new_duplicated_lines_density&view=list) [0.0% Duplication](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=364&metric=new_duplicated_lines_density&view=list) codecov[bot]: # [Codecov](https://codecov.io/gh/tefra/xsdata/pull/364?src=pr&el=h1) Report > Merging [#364](https://codecov.io/gh/tefra/xsdata/pull/364?src=pr&el=desc) (a730fd0) into [master](https://codecov.io/gh/tefra/xsdata/commit/ff428d68c61f254609465012cc62c49f3b88e575?el=desc) (ff428d6) will **not change** coverage. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/tefra/xsdata/pull/364/graphs/tree.svg?width=650&height=150&src=pr&token=YzDDLtywvl)](https://codecov.io/gh/tefra/xsdata/pull/364?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #364 +/- ## ========================================= Coverage 100.00% 100.00% ========================================= Files 71 71 Lines 5719 5719 Branches 996 996 ========================================= Hits 5719 5719 ``` | [Impacted Files](https://codecov.io/gh/tefra/xsdata/pull/364?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [xsdata/formats/dataclass/serializers/xml.py](https://codecov.io/gh/tefra/xsdata/pull/364/diff?src=pr&el=tree#diff-eHNkYXRhL2Zvcm1hdHMvZGF0YWNsYXNzL3NlcmlhbGl6ZXJzL3htbC5weQ==) | `100.00% <100.00%> (ø)` | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tefra/xsdata/pull/364?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tefra/xsdata/pull/364?src=pr&el=footer). Last update [ff428d6...a730fd0](https://codecov.io/gh/tefra/xsdata/pull/364?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/xsdata/formats/dataclass/serializers/xml.py b/xsdata/formats/dataclass/serializers/xml.py index 487ce89d..7a0e88f7 100644 --- a/xsdata/formats/dataclass/serializers/xml.py +++ b/xsdata/formats/dataclass/serializers/xml.py @@ -161,8 +161,8 @@ class XmlSerializer(AbstractSerializer): def write_tokens(self, value: Any, var: XmlVar, namespace: NoneStr) -> Generator: """Produce an events stream for the given tokens list or list of tokens lists.""" - if value: - if isinstance(value[0], list): + if value or var.nillable: + if value and isinstance(value[0], list): for val in value: yield from self.write_element(val, var, namespace) else:
XmlSerializer render empty nillable tokens lists
tefra/xsdata
diff --git a/tests/formats/dataclass/serializers/test_xml.py b/tests/formats/dataclass/serializers/test_xml.py index 10f1a9fe..7f065a9e 100644 --- a/tests/formats/dataclass/serializers/test_xml.py +++ b/tests/formats/dataclass/serializers/test_xml.py @@ -167,6 +167,17 @@ class XmlSerializerTests(TestCase): result = self.serializer.write_value([[1, 2, 3], [4, 5, 6]], var, "xsdata") self.assertEqual(expected, list(result)) + var = XmlElement(qname="a", name="a", tokens=True, nillable=True) + expected = [ + (XmlWriterEvent.START, "a"), + (XmlWriterEvent.ATTR, QNames.XSI_NIL, "true"), + (XmlWriterEvent.DATA, []), + (XmlWriterEvent.END, "a"), + ] + + result = self.serializer.write_value([], var, "xsdata") + self.assertEqual(expected, list(result)) + def test_write_any_type_with_primitive(self): var = XmlWildcard(qname="a", name="a") expected = [(XmlWriterEvent.DATA, "str")]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
20.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 certifi==2025.1.31 cfgv==3.4.0 chardet==5.2.0 charset-normalizer==3.4.1 click==8.1.8 click-default-group==1.2.4 click-log==0.4.0 codecov==2.1.13 colorama==0.4.6 coverage==7.8.0 distlib==0.3.9 docformatter==1.7.5 exceptiongroup==1.2.2 execnet==2.1.1 filelock==3.18.0 identify==2.6.9 idna==3.10 iniconfig==2.1.0 Jinja2==3.1.6 lxml==5.3.1 MarkupSafe==3.0.2 nodeenv==1.9.1 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 py-cpuinfo==9.0.0 pyproject-api==1.9.0 pytest==8.3.5 pytest-benchmark==5.1.0 pytest-cov==6.0.0 pytest-xdist==3.6.1 PyYAML==6.0.2 requests==2.32.3 tomli==2.2.1 toposort==1.10 tox==4.25.0 typing_extensions==4.13.0 untokenize==0.1.1 urllib3==2.3.0 virtualenv==20.29.3 -e git+https://github.com/tefra/xsdata.git@ff428d68c61f254609465012cc62c49f3b88e575#egg=xsdata
name: xsdata channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - certifi==2025.1.31 - cfgv==3.4.0 - chardet==5.2.0 - charset-normalizer==3.4.1 - click==8.1.8 - click-default-group==1.2.4 - click-log==0.4.0 - codecov==2.1.13 - colorama==0.4.6 - coverage==7.8.0 - distlib==0.3.9 - docformatter==1.7.5 - exceptiongroup==1.2.2 - execnet==2.1.1 - filelock==3.18.0 - identify==2.6.9 - idna==3.10 - iniconfig==2.1.0 - jinja2==3.1.6 - lxml==5.3.1 - markupsafe==3.0.2 - nodeenv==1.9.1 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - py-cpuinfo==9.0.0 - pyproject-api==1.9.0 - pytest==8.3.5 - pytest-benchmark==5.1.0 - pytest-cov==6.0.0 - pytest-xdist==3.6.1 - pyyaml==6.0.2 - requests==2.32.3 - tomli==2.2.1 - toposort==1.10 - tox==4.25.0 - typing-extensions==4.13.0 - untokenize==0.1.1 - urllib3==2.3.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/xsdata
[ "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_tokens" ]
[]
[ "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_attribute", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_render_mixed_content", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_generic_object", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_when_no_matching_choice_exists", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_primitive_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_generic_object", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_raw_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_data", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_can_overwrite_params", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_with_no_dataclass", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var_ignore_xs_string", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_nillable_true", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_mixed_content", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_object_with_derived_element", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_list_value", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_unhandled_xml_var", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_derived_class", "tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_illegal_derived_class" ]
[]
MIT License
9,237
207
[ "xsdata/formats/dataclass/serializers/xml.py" ]
construct__construct-889
2626b398a676191114f6ce5d8db903e9590700db
2020-12-20 21:47:42
ff455d0ac39444f223ec19882660ae7f72488bf3
diff --git a/construct/core.py b/construct/core.py index d2ec15e..785f4a0 100644 --- a/construct/core.py +++ b/construct/core.py @@ -351,7 +351,10 @@ class Construct(object): r""" Build an object into a closed binary file. See build(). """ - with open(filename, 'wb') as f: + # Open the file for reading as well as writing. This allows builders to + # read back the stream just written. For example. RawCopy does this. + # See issue #888. + with open(filename, 'w+b') as f: self.build_stream(obj, f, **contextkw) def _build(self, obj, stream, context, path):
build_file() crashes when using RawCopy with a value only Thank you for construct! Here's a failing test case: def test_rawcopy_build_file_from_value_only(tmpdir): d = RawCopy(Byte) d.build_file(dict(value=0), filename=tmpdir.join('test')) Version: master branch, commit 2626b39. Expected result: success Actual result: construct.core.StreamError: Error in path (building) stream.read() failed, requested 1 bytes The cause is that `Construct.build_file()` opens the stream for writing only, but `RawCopy._build()` assumes that the stream is readable when it rewinds the stream to read out what was written to populate the `data` attribute. Pull request to follow.
construct/construct
diff --git a/tests/declarativeunittest.py b/tests/declarativeunittest.py index d02a292..a39fb67 100644 --- a/tests/declarativeunittest.py +++ b/tests/declarativeunittest.py @@ -3,7 +3,7 @@ xfail = pytest.mark.xfail skip = pytest.mark.skip skipif = pytest.mark.skipif -import os, math, random, collections, itertools, io, hashlib, binascii +import os, math, random, collections, itertools, io, hashlib, binascii, tempfile from construct import * from construct.lib import * diff --git a/tests/test_core.py b/tests/test_core.py index 02e2e64..9da306c 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -930,6 +930,15 @@ def test_rawcopy_issue_358(): d = Struct("a"/RawCopy(Byte), "check"/Check(this.a.value == 255)) assert d.build(dict(a=dict(value=255))) == b"\xff" +def test_rawcopy_issue_888(): + # If you use build_file() on a RawCopy that has only a value defined, then + # RawCopy._build may also attempt to read from the file, which won't work + # if build_file opened the file for writing only. + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, 'test') + d = RawCopy(Byte) + d.build_file(dict(value=0), filename=fname) + def test_byteswapped(): d = ByteSwapped(Bytes(5)) common(d, b"12345", b"54321", 5)
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
2.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-benchmark", "pytest-cov", "twine", "enum34", "numpy", "arrow", "ruamel.yaml" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
arrow==1.2.3 bleach==6.0.0 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 charset-normalizer==3.4.1 -e git+https://github.com/construct/construct.git@2626b398a676191114f6ce5d8db903e9590700db#egg=construct coverage==7.2.7 cryptography==44.0.2 docutils==0.20.1 enum34==1.1.10 exceptiongroup==1.2.2 idna==3.10 importlib-metadata==6.7.0 importlib-resources==5.12.0 iniconfig==2.0.0 jaraco.classes==3.2.3 jeepney==0.9.0 keyring==24.1.1 markdown-it-py==2.2.0 mdurl==0.1.2 more-itertools==9.1.0 numpy==1.21.6 packaging==24.0 pkginfo==1.10.0 pluggy==1.2.0 py-cpuinfo==9.0.0 pycparser==2.21 Pygments==2.17.2 pytest==7.4.4 pytest-benchmark==4.0.0 pytest-cov==4.1.0 python-dateutil==2.9.0.post0 readme-renderer==37.3 requests==2.31.0 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==13.8.1 ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.8 SecretStorage==3.3.3 six==1.17.0 tomli==2.0.1 twine==4.0.2 typing_extensions==4.7.1 urllib3==2.0.7 webencodings==0.5.1 zipp==3.15.0
name: construct channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - arrow==1.2.3 - bleach==6.0.0 - cffi==1.15.1 - charset-normalizer==3.4.1 - coverage==7.2.7 - cryptography==44.0.2 - docutils==0.20.1 - enum34==1.1.10 - exceptiongroup==1.2.2 - idna==3.10 - importlib-metadata==6.7.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - jaraco-classes==3.2.3 - jeepney==0.9.0 - keyring==24.1.1 - markdown-it-py==2.2.0 - mdurl==0.1.2 - more-itertools==9.1.0 - numpy==1.21.6 - packaging==24.0 - pkginfo==1.10.0 - pluggy==1.2.0 - py-cpuinfo==9.0.0 - pycparser==2.21 - pygments==2.17.2 - pytest==7.4.4 - pytest-benchmark==4.0.0 - pytest-cov==4.1.0 - python-dateutil==2.9.0.post0 - readme-renderer==37.3 - requests==2.31.0 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==13.8.1 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.8 - secretstorage==3.3.3 - six==1.17.0 - tomli==2.0.1 - twine==4.0.2 - typing-extensions==4.7.1 - urllib3==2.0.7 - webencodings==0.5.1 - zipp==3.15.0 prefix: /opt/conda/envs/construct
[ "tests/test_core.py::test_rawcopy_issue_888" ]
[]
[ "tests/test_core.py::test_bytes", "tests/test_core.py::test_greedybytes", "tests/test_core.py::test_bytes_issue_827", "tests/test_core.py::test_bitwise", "tests/test_core.py::test_bytewise", "tests/test_core.py::test_ints", "tests/test_core.py::test_ints24", "tests/test_core.py::test_halffloats", "tests/test_core.py::test_floats", "tests/test_core.py::test_formatfield", "tests/test_core.py::test_formatfield_ints_randomized", "tests/test_core.py::test_formatfield_floats_randomized", "tests/test_core.py::test_bytesinteger", "tests/test_core.py::test_bitsinteger", "tests/test_core.py::test_varint", "tests/test_core.py::test_varint_issue_705", "tests/test_core.py::test_paddedstring", "tests/test_core.py::test_pascalstring", "tests/test_core.py::test_cstring", "tests/test_core.py::test_greedystring", "tests/test_core.py::test_string_encodings", "tests/test_core.py::test_flag", "tests/test_core.py::test_enum", "tests/test_core.py::test_enum_enum34", "tests/test_core.py::test_enum_enum36", "tests/test_core.py::test_enum_issue_298", "tests/test_core.py::test_enum_issue_677", "tests/test_core.py::test_flagsenum", "tests/test_core.py::test_flagsenum_enum34", "tests/test_core.py::test_flagsenum_enum36", "tests/test_core.py::test_mapping", "tests/test_core.py::test_struct", "tests/test_core.py::test_struct_nested", "tests/test_core.py::test_struct_kwctor", "tests/test_core.py::test_struct_proper_context", "tests/test_core.py::test_struct_sizeof_context_nesting", "tests/test_core.py::test_sequence", "tests/test_core.py::test_sequence_nested", "tests/test_core.py::test_array", "tests/test_core.py::test_array_nontellable", "tests/test_core.py::test_greedyrange", "tests/test_core.py::test_repeatuntil", "tests/test_core.py::test_const", "tests/test_core.py::test_computed", "tests/test_core.py::test_index", "tests/test_core.py::test_rebuild", "tests/test_core.py::test_rebuild_issue_664", "tests/test_core.py::test_default", "tests/test_core.py::test_check", "tests/test_core.py::test_error", "tests/test_core.py::test_focusedseq", "tests/test_core.py::test_pickled", "tests/test_core.py::test_numpy", "tests/test_core.py::test_namedtuple", "tests/test_core.py::test_timestamp", "tests/test_core.py::test_hex", "tests/test_core.py::test_hexdump", "tests/test_core.py::test_hexdump_regression_issue_188", "tests/test_core.py::test_union", "tests/test_core.py::test_union_kwctor", "tests/test_core.py::test_union_issue_348", "tests/test_core.py::test_select", "tests/test_core.py::test_select_kwctor", "tests/test_core.py::test_optional", "tests/test_core.py::test_optional_in_struct_issue_747", "tests/test_core.py::test_optional_in_bit_struct_issue_747", "tests/test_core.py::test_select_buildfromnone_issue_747", "tests/test_core.py::test_if", "tests/test_core.py::test_ifthenelse", "tests/test_core.py::test_switch", "tests/test_core.py::test_switch_issue_357", "tests/test_core.py::test_stopif", "tests/test_core.py::test_padding", "tests/test_core.py::test_padded", "tests/test_core.py::test_aligned", "tests/test_core.py::test_alignedstruct", "tests/test_core.py::test_bitstruct", "tests/test_core.py::test_pointer", "tests/test_core.py::test_peek", "tests/test_core.py::test_seek", "tests/test_core.py::test_tell", "tests/test_core.py::test_pass", "tests/test_core.py::test_terminated", "tests/test_core.py::test_rawcopy", "tests/test_core.py::test_rawcopy_issue_289", "tests/test_core.py::test_rawcopy_issue_358", "tests/test_core.py::test_byteswapped", "tests/test_core.py::test_byteswapped_from_issue_70", "tests/test_core.py::test_bitsswapped", "tests/test_core.py::test_prefixed", "tests/test_core.py::test_prefixedarray", "tests/test_core.py::test_fixedsized", "tests/test_core.py::test_nullterminated", "tests/test_core.py::test_nullstripped", "tests/test_core.py::test_restreamdata", "tests/test_core.py::test_transformed", "tests/test_core.py::test_transformed_issue_676", "tests/test_core.py::test_restreamed", "tests/test_core.py::test_restreamed_partial_read", "tests/test_core.py::test_processxor", "tests/test_core.py::test_processrotateleft", "tests/test_core.py::test_checksum", "tests/test_core.py::test_checksum_nonbytes_issue_323", "tests/test_core.py::test_checksum_warnings_issue_841", "tests/test_core.py::test_compressed_zlib", "tests/test_core.py::test_compressed_gzip", "tests/test_core.py::test_compressed_bzip2", "tests/test_core.py::test_compressed_lzma", "tests/test_core.py::test_compressed_prefixed", "tests/test_core.py::test_rebuffered", "tests/test_core.py::test_lazy", "tests/test_core.py::test_lazystruct", "tests/test_core.py::test_lazyarray", "tests/test_core.py::test_lazybound", "tests/test_core.py::test_expradapter", "tests/test_core.py::test_exprsymmetricadapter", "tests/test_core.py::test_exprvalidator", "tests/test_core.py::test_ipaddress_adapter_issue_95", "tests/test_core.py::test_oneof", "tests/test_core.py::test_noneof", "tests/test_core.py::test_filter", "tests/test_core.py::test_slicing", "tests/test_core.py::test_indexing", "tests/test_core.py::test_probe", "tests/test_core.py::test_debugger", "tests/test_core.py::test_repr", "tests/test_core.py::test_operators", "tests/test_core.py::test_operators_issue_87", "tests/test_core.py::test_from_issue_76", "tests/test_core.py::test_from_issue_60", "tests/test_core.py::test_from_issue_171", "tests/test_core.py::test_from_issue_175", "tests/test_core.py::test_from_issue_71", "tests/test_core.py::test_from_issue_231", "tests/test_core.py::test_from_issue_246", "tests/test_core.py::test_from_issue_244", "tests/test_core.py::test_from_issue_269", "tests/test_core.py::test_hanging_issue_280", "tests/test_core.py::test_from_issue_324", "tests/test_core.py::test_from_issue_357", "tests/test_core.py::test_context_is_container", "tests/test_core.py::test_from_issue_362", "tests/test_core.py::test_this_expresion_compare_container", "tests/test_core.py::test_exposing_members_attributes", "tests/test_core.py::test_exposing_members_context", "tests/test_core.py::test_isparsingbuilding", "tests/test_core.py::test_struct_stream", "tests/test_core.py::test_struct_root_topmost", "tests/test_core.py::test_parsedhook_repeatersdiscard", "tests/test_core.py::test_exportksy", "tests/test_core.py::test_greedyrange_issue_697", "tests/test_core.py::test_greedybytes_issue_697", "tests/test_core.py::test_hex_issue_709", "tests/test_core.py::test_buildfile_issue_737" ]
[]
MIT License
9,239
186
[ "construct/core.py" ]
googleapis__gapic-generator-python-717
39be474b4419dfa521ef51927fd36dbf257d68e3
2020-12-22 01:18:07
39be474b4419dfa521ef51927fd36dbf257d68e3
codecov[bot]: # [Codecov](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717?src=pr&el=h1) Report > Merging [#717](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717?src=pr&el=desc) (83836f2) into [master](https://codecov.io/gh/googleapis/gapic-generator-python/commit/edadb228d74630de1595e4151b59ad828ea8fdc6?el=desc) (edadb22) will **not change** coverage. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717/graphs/tree.svg?width=650&height=150&src=pr&token=rvXPSpOilG)](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #717 +/- ## ========================================= Coverage 100.00% 100.00% ========================================= Files 26 26 Lines 1598 1604 +6 Branches 324 326 +2 ========================================= + Hits 1598 1604 +6 ``` | [Impacted Files](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [gapic/schema/naming.py](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717/diff?src=pr&el=tree#diff-Z2FwaWMvc2NoZW1hL25hbWluZy5weQ==) | `100.00% <100.00%> (ø)` | | | [gapic/utils/options.py](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717/diff?src=pr&el=tree#diff-Z2FwaWMvdXRpbHMvb3B0aW9ucy5weQ==) | `100.00% <100.00%> (ø)` | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717?src=pr&el=footer). Last update [edadb22...83836f2](https://codecov.io/gh/googleapis/gapic-generator-python/pull/717?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/gapic/schema/naming.py b/gapic/schema/naming.py index c591ad59..3f49a18a 100644 --- a/gapic/schema/naming.py +++ b/gapic/schema/naming.py @@ -42,6 +42,7 @@ class Naming(abc.ABC): version: str = '' product_name: str = '' proto_package: str = '' + _warehouse_package_name: str = '' def __post_init__(self): if not self.product_name: @@ -141,6 +142,10 @@ class Naming(abc.ABC): # with ('x.y',) will become a two-tuple: ('x', 'y') i.capitalize() for i in '.'.join(opts.namespace).split('.') )) + if opts.warehouse_package_name: + package_info = dataclasses.replace(package_info, + _warehouse_package_name=opts.warehouse_package_name + ) # Done; return the naming information. return package_info @@ -186,9 +191,11 @@ class Naming(abc.ABC): @property def warehouse_package_name(self) -> str: """Return the appropriate Python package name for Warehouse.""" - - # Piece the name and namespace together to come up with the - # proper package name. + # If a custom name has been set, use it + if self._warehouse_package_name: + return self._warehouse_package_name + # Otherwise piece the name and namespace together to come + # up with the proper package name. answer = list(self.namespace) + self.name.split(' ') return '-'.join(answer).lower() diff --git a/gapic/utils/options.py b/gapic/utils/options.py index d99e34c6..b8e79a06 100644 --- a/gapic/utils/options.py +++ b/gapic/utils/options.py @@ -34,6 +34,7 @@ class Options: """ name: str = '' namespace: Tuple[str, ...] = dataclasses.field(default=()) + warehouse_package_name: str = '' retry: Optional[Dict[str, Any]] = None sample_configs: Tuple[str, ...] = dataclasses.field(default=()) templates: Tuple[str, ...] = dataclasses.field(default=('DEFAULT',)) @@ -53,6 +54,7 @@ class Options: 'add-iam-methods', # microgenerator implementation for `reroute_to_grpc_interface` # transport type(s) delineated by '+' (i.e. grpc, rest, custom.[something], etc?) 'transport', + 'warehouse-package-name' # change the package name on PyPI )) @classmethod @@ -129,6 +131,8 @@ class Options: answer = Options( name=opts.pop('name', ['']).pop(), namespace=tuple(opts.pop('namespace', [])), + warehouse_package_name=opts.pop( + 'warehouse-package-name', ['']).pop(), retry=retry_cfg, sample_configs=tuple( cfg_path
Allow package name to be customized via a generator option https://github.com/googleapis/gapic-generator-python/blob/f86a47b6431e374ae1797061511b49fe6bf22daf/gapic/schema/naming.py#L186-L193 Occasionally we want to deviate from the generated provided `api.naming.warehouse_package_name`. * The published package includes two GAPICs (firestore admin and firestore) and we want the library to be consistently named `google-cloud-firestore`. * The API has a long name and we want to rename the package to make it easier to spell. `google-cloud-assuredworkloads` -> `google-cloud-assured-workloads`. The name is used to identify the library in request headers so it is somewhat important. https://github.com/googleapis/gapic-generator-python/blob/af17501d258c7c37fc1081fcad5fe18f7629f4c3/gapic/templates/%25namespace/%25name_%25version/%25sub/services/%25service/transports/base.py.j2#L29-L33 CC @crwilcox
googleapis/gapic-generator-python
diff --git a/tests/unit/generator/test_options.py b/tests/unit/generator/test_options.py index 5235c2e4..60d365a8 100644 --- a/tests/unit/generator/test_options.py +++ b/tests/unit/generator/test_options.py @@ -152,3 +152,8 @@ def test_options_old_naming(): def test_options_add_iam_methods(): opts = Options.build('add-iam-methods') assert opts.add_iam_methods + + +def test_options_warehouse_package_name(): + opts = Options.build('warehouse-package-name') + assert opts.warehouse_package_name diff --git a/tests/unit/schema/test_naming.py b/tests/unit/schema/test_naming.py index ec1e0dad..c0487b7d 100644 --- a/tests/unit/schema/test_naming.py +++ b/tests/unit/schema/test_naming.py @@ -218,6 +218,16 @@ def test_cli_override_name_and_namespace_versionless(): assert not n.version +def test_cli_override_warehouse_package_name(): + FileDesc = descriptor_pb2.FileDescriptorProto + proto1 = FileDesc(package='google.translation') + n = naming.Naming.build( + proto1, + opts=Options(warehouse_package_name='google-cloud-foo'), + ) + assert n.warehouse_package_name == "google-cloud-foo" + + def test_build_factory(): proto = descriptor_pb2.FileDescriptorProto( package='google.mollusc.v1alpha1'
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.38
{ "env_vars": null, "env_yml_path": null, "install": "pip install --editable .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-asyncio", "pyfakefs" ], "pre_install": [ "apt-get update", "apt-get install -y gcc", "apt-get install -y pandoc" ], "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==4.2.4 certifi==2025.1.31 charset-normalizer==3.4.1 click==7.1.2 coverage==7.6.1 dataclasses==0.6 exceptiongroup==1.2.2 execnet==2.1.1 -e git+https://github.com/googleapis/gapic-generator-python.git@39be474b4419dfa521ef51927fd36dbf257d68e3#egg=gapic_generator google-api-core==1.24.1 google-auth==1.35.0 googleapis-common-protos==1.52.0 grpcio==1.70.0 idna==3.10 iniconfig==2.1.0 Jinja2==2.11.2 MarkupSafe==1.1.1 packaging==24.2 pluggy==1.5.0 protobuf==3.14.0 pyasn1==0.6.1 pyasn1_modules==0.4.2 pyfakefs==5.8.0 pypandoc==1.5 pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-xdist==3.6.1 pytz==2025.2 PyYAML==5.3.1 requests==2.32.3 rsa==4.9 six==1.17.0 tomli==2.2.1 urllib3==2.2.3
name: gapic-generator-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==4.2.4 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==7.1.2 - coverage==7.6.1 - dataclasses==0.6 - exceptiongroup==1.2.2 - execnet==2.1.1 - google-api-core==1.24.1 - google-auth==1.35.0 - googleapis-common-protos==1.52.0 - grpcio==1.70.0 - idna==3.10 - iniconfig==2.1.0 - jinja2==2.11.2 - markupsafe==1.1.1 - packaging==24.2 - pluggy==1.5.0 - protobuf==3.14.0 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pyfakefs==5.8.0 - pypandoc==1.5 - pytest==8.3.5 - pytest-asyncio==0.24.0 - pytest-cov==5.0.0 - pytest-xdist==3.6.1 - pytz==2025.2 - pyyaml==5.3.1 - requests==2.32.3 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - urllib3==2.2.3 prefix: /opt/conda/envs/gapic-generator-python
[ "tests/unit/generator/test_options.py::test_options_warehouse_package_name", "tests/unit/schema/test_naming.py::test_cli_override_warehouse_package_name" ]
[]
[ "tests/unit/generator/test_options.py::test_options_empty", "tests/unit/generator/test_options.py::test_options_replace_templates", "tests/unit/generator/test_options.py::test_options_relative_templates", "tests/unit/generator/test_options.py::test_options_unrecognized", "tests/unit/generator/test_options.py::test_flags_unrecognized", "tests/unit/generator/test_options.py::test_options_unrecognized_likely_typo", "tests/unit/generator/test_options.py::test_options_trim_whitespace", "tests/unit/generator/test_options.py::test_options_no_valid_sample_config", "tests/unit/generator/test_options.py::test_options_service_config", "tests/unit/generator/test_options.py::test_options_lazy_import", "tests/unit/generator/test_options.py::test_options_old_naming", "tests/unit/generator/test_options.py::test_options_add_iam_methods", "tests/unit/schema/test_naming.py::test_long_name", "tests/unit/schema/test_naming.py::test_module_name", "tests/unit/schema/test_naming.py::test_versioned_module_name_no_version", "tests/unit/schema/test_naming.py::test_versioned_module_name", "tests/unit/schema/test_naming.py::test_namespace_packages", "tests/unit/schema/test_naming.py::test_warehouse_package_name_no_namespace", "tests/unit/schema/test_naming.py::test_warehouse_package_name_with_namespace", "tests/unit/schema/test_naming.py::test_warehouse_package_name_multiple_words", "tests/unit/schema/test_naming.py::test_build_no_annotations", "tests/unit/schema/test_naming.py::test_build_no_annotations_no_version", "tests/unit/schema/test_naming.py::test_build_no_namespace", "tests/unit/schema/test_naming.py::test_inconsistent_package_error", "tests/unit/schema/test_naming.py::test_subpackages", "tests/unit/schema/test_naming.py::test_cli_override_name", "tests/unit/schema/test_naming.py::test_cli_override_name_underscores", "tests/unit/schema/test_naming.py::test_cli_override_namespace", "tests/unit/schema/test_naming.py::test_cli_override_namespace_dotted", "tests/unit/schema/test_naming.py::test_cli_override_name_and_namespace", "tests/unit/schema/test_naming.py::test_cli_override_name_and_namespace_versionless", "tests/unit/schema/test_naming.py::test_build_factory" ]
[]
Apache License 2.0
9,249
714
[ "gapic/schema/naming.py", "gapic/utils/options.py" ]
saketkc__pysradb-92
733c938d4328c4b6ccf3394f3b107c285468d2d9
2020-12-23 02:05:23
dfe1b3ac8d99e7a915b0aa9b845a7ff8fa64b6cc
codecov[bot]: # [Codecov](https://codecov.io/gh/saketkc/pysradb/pull/92?src=pr&el=h1) Report > Merging [#92](https://codecov.io/gh/saketkc/pysradb/pull/92?src=pr&el=desc) (989155f) into [master](https://codecov.io/gh/saketkc/pysradb/commit/733c938d4328c4b6ccf3394f3b107c285468d2d9?el=desc) (733c938) will **increase** coverage by `0.22%`. > The diff coverage is `100.00%`. [![Impacted file tree graph](https://codecov.io/gh/saketkc/pysradb/pull/92/graphs/tree.svg?width=650&height=150&src=pr&token=w1fPTXpzK2)](https://codecov.io/gh/saketkc/pysradb/pull/92?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #92 +/- ## ========================================== + Coverage 54.19% 54.41% +0.22% ========================================== Files 7 7 Lines 1777 1777 ========================================== + Hits 963 967 +4 + Misses 814 810 -4 ``` | [Impacted Files](https://codecov.io/gh/saketkc/pysradb/pull/92?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [pysradb/search.py](https://codecov.io/gh/saketkc/pysradb/pull/92/diff?src=pr&el=tree#diff-cHlzcmFkYi9zZWFyY2gucHk=) | `79.81% <100.00%> (ø)` | | | [pysradb/sraweb.py](https://codecov.io/gh/saketkc/pysradb/pull/92/diff?src=pr&el=tree#diff-cHlzcmFkYi9zcmF3ZWIucHk=) | `80.76% <0.00%> (+0.80%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/saketkc/pysradb/pull/92?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/saketkc/pysradb/pull/92?src=pr&el=footer). Last update [733c938...989155f](https://codecov.io/gh/saketkc/pysradb/pull/92?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/pysradb/search.py b/pysradb/search.py index a0a582c..613c44b 100644 --- a/pysradb/search.py +++ b/pysradb/search.py @@ -1,6 +1,5 @@ """This file contains the search classes for the search feature. """ -import numpy as np import os import re import requests @@ -78,9 +77,19 @@ class QuerySearch: ------- get_df() Returns the dataframe storing this search result. + search() Executes the search. + show_result_statistics() + Shows summary information about search results. + + visualise_results() + Generate graphs that visualise the search results. + + get_plot_objects(): + Get the plot objects for plots generated. + """ def __init__( @@ -682,6 +691,18 @@ class SraSearch(QuerySearch): sends the user query via requests to NCBI Entrez API and returns search results as a pandas dataframe. + show_result_statistics() + Shows summary information about search results. + + visualise_results() + Generate graphs that visualise the search results. + + get_plot_objects(): + Get the plot objects for plots generated. + + get_uids(): + Get NCBI uids retrieved during this search query. + _format_query_string() formats the input user query into a string @@ -732,6 +753,7 @@ class SraSearch(QuerySearch): ) self.entries = {} self.number_entries = 0 + self.uids = [] def search(self): # Step 1: retrieves the list of uids that satisfies the input @@ -744,21 +766,21 @@ class SraSearch(QuerySearch): timeout=SEARCH_REQUEST_TIMEOUT, ) r.raise_for_status() - uids = r.json()["esearchresult"]["idlist"] + self.uids = r.json()["esearchresult"]["idlist"] # Step 2: retrieves the detailed information for each uid # returned, in groups of SRA_SEARCH_GROUP_SIZE. - if not uids: + if not self.uids: print( f"No results found for the following search query: \n {self.fields}" ) return # If no queries found, return nothing - pbar = tqdm(total=len(uids)) - for i in range(0, len(uids), SRA_SEARCH_GROUP_SIZE): + pbar = tqdm(total=len(self.uids)) + for i in range(0, len(self.uids), SRA_SEARCH_GROUP_SIZE): current_uids = ",".join( - uids[i : min(i + SRA_SEARCH_GROUP_SIZE, len(uids))] + self.uids[i : min(i + SRA_SEARCH_GROUP_SIZE, len(self.uids))] ) - pbar.update(min(SRA_SEARCH_GROUP_SIZE, len(uids) - i)) + pbar.update(min(SRA_SEARCH_GROUP_SIZE, len(self.uids) - i)) payload2 = {"db": "sra", "retmode": "xml", "id": current_uids} r = requests_3_retries().get( @@ -772,8 +794,7 @@ class SraSearch(QuerySearch): self._format_response(r.raw) pbar.close() self._format_result() - if self.verbosity >= 2: - self.df["pmid"] = list(uids) + except requests.exceptions.Timeout: sys.exit(f"Connection to the server has timed out. Please retry.") except requests.exceptions.HTTPError: @@ -782,6 +803,14 @@ class SraSearch(QuerySearch): f"\nURL queried: {r.url} \nUser query: {self.fields}" ) + def get_uids(self): + """Get NCBI uids retrieved during this search query. + + Note: There is a chance that some uids retrieved do not appear in + the search result output (Refer to #88) + """ + return self.uids + def _format_query_string(self): term = "" if self.fields["query"]: @@ -1195,6 +1224,15 @@ class EnaSearch(QuerySearch): sends the user query via requests to ENA API and stores search result as an instance attribute in the form of a pandas dataframe + show_result_statistics() + Shows summary information about search results. + + visualise_results() + Generate graphs that visualise the search results. + + get_plot_objects(): + Get the plot objects for plots generated. + _format_query_string() formats the input user query into a string @@ -1467,6 +1505,15 @@ class GeoSearch(SraSearch): depending on the search query. If query is sent to both APIs, the intersection of the two sets of query results are returned. + show_result_statistics() + Shows summary information about search results. + + visualise_results() + Generate graphs that visualise the search results. + + get_plot_objects(): + Get the plot objects for plots generated. + _format_geo_query_string() formats the GEO DataSets portion of the input user query into a string. diff --git a/pysradb/utils.py b/pysradb/utils.py index a7be6d9..53c986d 100644 --- a/pysradb/utils.py +++ b/pysradb/utils.py @@ -45,14 +45,13 @@ def requests_3_retries(): ------- session: requests.Session requests session object that allows 3 retries for server-side - errors, for GET and POST requests. + errors. """ session = requests.Session() retry = Retry( total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504], - allowed_methods=["POST", "GET", "HEAD"], ) adapter = HTTPAdapter(max_retries=retry) session.mount("http://", adapter)
[BUG] ValueError when using SraSearch to query **Describe the bug** Using `SraSearch` with `verbosity>=2` and a large query raises a ValueError when setting `self.df["pmid"] = list(uids)` (https://github.com/saketkc/pysradb/blob/c23d4a769543d05a0f002d1b28c985da5963573f/pysradb/search.py#L776) because the size of the underlying dataframe seems to vary. The following error is raised: ```python ValueError Traceback (most recent call last) <ipython-input-1-d19c33e66199> in <module> 1 instance = SraSearch(verbosity=3, return_max=max_query_num, query=query, platform='illumina') ----> 2 instance.search() 3 df_search = instance.get_df() /pysradb/search.py in search(self) 774 self._format_result() 775 if self.verbosity >= 2: --> 776 self.df["pmid"] = list(uids) 777 except requests.exceptions.Timeout: 778 sys.exit(f"Connection to the server has timed out. Please retry.") /pandas/core/frame.py in __setitem__(self, key, value) 3038 else: 3039 # set column -> 3040 self._set_item(key, value) 3041 3042 def _setitem_slice(self, key: slice, value): /pandas/core/frame.py in _set_item(self, key, value) 3114 """ 3115 self._ensure_valid_index(value) -> 3116 value = self._sanitize_column(key, value) 3117 NDFrame._set_item(self, key, value) 3118 /pandas/core/frame.py in _sanitize_column(self, key, value, broadcast) 3762 3763 # turn me into an ndarray -> 3764 value = sanitize_index(value, self.index) 3765 if not isinstance(value, (np.ndarray, Index)): 3766 if isinstance(value, list) and len(value) > 0: /pandas/core/internals/construction.py in sanitize_index(data, index) 745 """ 746 if len(data) != len(index): --> 747 raise ValueError( 748 "Length of values " 749 f"({len(data)}) " ValueError: Length of values (86768) does not match length of index (86721) ``` Multiple runs yield slightly different error messages: ```python ValueError: Length of values (86768) does not match length of index (86760) ``` It seems like the index length is varying for some reason. **To Reproduce** Execute the following code: ``` from pysradb.search import SraSearch max_query_num = 1_000_000 query = 'txid2697049[Organism:noexp] AND ("filetype cram"[Properties] OR "filetype bam"[Properties] OR "filetype fastq"[Properties])' instance = SraSearch(verbosity=2, return_max=max_query_num, query=query, platform='illumina') instance.search() df_search = instance.get_df() ``` **Desktop:** - OS: `Linux` - Python version: `3.8.5` - pysradb version: `0.11.2-dev0`
saketkc/pysradb
diff --git a/tests/data/test_search/sra_uids.txt b/tests/data/test_search/sra_uids.txt new file mode 100644 index 0000000..5a75d5d --- /dev/null +++ b/tests/data/test_search/sra_uids.txt @@ -0,0 +1,49 @@ +246598 +246597 +246596 +246595 +181685 +181684 +181683 +155791 +155790 +130901 +130900 +130899 +130898 +130897 +130896 +130895 +130894 +130893 +130892 +130891 +130890 +130889 +130888 +130887 +130886 +130885 +130884 +130883 +130882 +130881 +130880 +214061 +214060 +214059 +214028 +214027 +213982 +202173 +202172 +202171 +202169 +202168 +202167 +201949 +201948 +201814 +201806 +201805 +201804 diff --git a/tests/test_search.py b/tests/test_search.py index 9fde418..4703bea 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -839,6 +839,13 @@ def sra_formatted_responses_2(): ] [email protected](scope="module") +def sra_uids(): + with open("./tests/data/test_search/sra_uids.txt", "r") as f: + uids = f.read().splitlines() + return uids + + @pytest.fixture(scope="module") def ena_responses_json(): data = [] @@ -920,6 +927,14 @@ def test_sra_search_1(): assert found_accessions == set(expected_accessions) +def test_sra_uids(sra_uids): + instance = SraSearch( + 3, 1000, query="ribosome profiling", publication_date="01-10-2012:01-01-2013" + ) + instance.search() + assert instance.get_uids() == sra_uids + + def test_valid_search_query_1_sra(valid_search_inputs_1): expected_query = [ "covid-19",
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
0.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "flake8", "black" ], "pre_install": [], "python": "3.8", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==24.8.0 certifi==2025.1.31 chardet==4.0.0 click==8.1.8 coverage==7.6.1 exceptiongroup==1.2.2 flake8==7.1.2 idna==2.10 iniconfig==2.1.0 mccabe==0.7.0 mypy-extensions==1.0.0 numpy==1.24.4 packaging==24.2 pandas==1.1.5 pathspec==0.12.1 platformdirs==4.3.6 pluggy==1.5.0 pycodestyle==2.12.1 pyflakes==3.2.0 -e git+https://github.com/saketkc/pysradb.git@733c938d4328c4b6ccf3394f3b107c285468d2d9#egg=pysradb pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.25.1 requests-ftp==0.3.1 six==1.17.0 tomli==2.2.1 tqdm==4.54.1 typing_extensions==4.13.0 urllib3==1.26.20 xmltodict==0.12.0
name: pysradb channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=24.2=py38h06a4308_0 - python=3.8.20=he870216_0 - readline=8.2=h5eee18b_0 - setuptools=75.1.0=py38h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.44.0=py38h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==24.8.0 - certifi==2025.1.31 - chardet==4.0.0 - click==8.1.8 - coverage==7.6.1 - exceptiongroup==1.2.2 - flake8==7.1.2 - idna==2.10 - iniconfig==2.1.0 - mccabe==0.7.0 - mypy-extensions==1.0.0 - numpy==1.24.4 - packaging==24.2 - pandas==1.1.5 - pathspec==0.12.1 - platformdirs==4.3.6 - pluggy==1.5.0 - pycodestyle==2.12.1 - pyflakes==3.2.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.25.1 - requests-ftp==0.3.1 - six==1.17.0 - tomli==2.2.1 - tqdm==4.54.1 - typing-extensions==4.13.0 - urllib3==1.26.20 - xmltodict==0.12.0 prefix: /opt/conda/envs/pysradb
[ "tests/test_search.py::test_sra_uids" ]
[ "tests/test_search.py::test_ena_search_2", "tests/test_search.py::test_valid_search_query_1_ena", "tests/test_search.py::test_valid_search_query_2_ena", "tests/test_search.py::test_geo_search_1" ]
[ "tests/test_search.py::test_invalid_search_query", "tests/test_search.py::test_sra_search_1", "tests/test_search.py::test_valid_search_query_1_sra", "tests/test_search.py::test_valid_search_query_2_sra", "tests/test_search.py::test_sra_search_format_request", "tests/test_search.py::test_sra_search_format_result_1", "tests/test_search.py::test_sra_search_format_result_2", "tests/test_search.py::test_ena_search_1", "tests/test_search.py::test_ena_search_3", "tests/test_search.py::test_ena_search_format_request", "tests/test_search.py::test_ena_search_format_result", "tests/test_search.py::test_valid_search_query_geo", "tests/test_search.py::test_geo_search_format_request", "tests/test_search.py::test_geo_info" ]
[]
BSD 3-Clause "New" or "Revised" License
9,260
1,406
[ "pysradb/search.py", "pysradb/utils.py" ]
bridgecrewio__checkov-748
18b6fab1a1a2b96a9829f6772f228705b31a6310
2020-12-23 15:07:40
25b466be980e420e64519fbf74c93a35a0c94203
diff --git a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py index 8e7bfee51..d0aa56f3e 100644 --- a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py +++ b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py @@ -11,16 +11,18 @@ class MSKClusterEncryption(BaseResourceCheck): super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): + # Note: As long as the 'encryption_info' block is specified, the cluster + # will be encrypted at rest even if 'encryption_at_rest_kms_key_arn' is not specified + # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn if 'encryption_info' in conf.keys(): encryption = conf['encryption_info'][0] - if 'encryption_at_rest_kms_key_arn' in encryption: - if 'encryption_in_transit' in encryption: - transit = encryption['encryption_in_transit'][0] - if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \ - 'in_cluster' in transit and transit['in_cluster'][0] is False: - return CheckResult.FAILED - return CheckResult.PASSED + if 'encryption_in_transit' in encryption: + transit = encryption['encryption_in_transit'][0] + if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \ + 'in_cluster' in transit and transit['in_cluster'][0] is False: + return CheckResult.FAILED return CheckResult.PASSED + return CheckResult.PASSED return CheckResult.FAILED
False positive for check 'MSKClusterEncryption' (CKV_AWS_81) **Describe the bug** CKV_AWS_81 is reporting MSK clusters as unencrypted at rest while they are encrypted at rest. **To Reproduce** Example Terraform code producing a cluster with encryption (it will use the default KMS key as documented in https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn) (slightly adapted from example on the msk_cluster resource docs) <details> <summary>Code</summary> ``` resource "aws_vpc" "vpc" { cidr_block = "192.168.0.0/22" } data "aws_availability_zones" "azs" { state = "available" } resource "aws_subnet" "subnet_az1" { availability_zone = data.aws_availability_zones.azs.names[0] cidr_block = "192.168.0.0/24" vpc_id = aws_vpc.vpc.id } resource "aws_subnet" "subnet_az2" { availability_zone = data.aws_availability_zones.azs.names[1] cidr_block = "192.168.1.0/24" vpc_id = aws_vpc.vpc.id } resource "aws_subnet" "subnet_az3" { availability_zone = data.aws_availability_zones.azs.names[2] cidr_block = "192.168.2.0/24" vpc_id = aws_vpc.vpc.id } resource "aws_security_group" "sg" { vpc_id = aws_vpc.vpc.id } resource "aws_kms_key" "kms" { description = "example" } resource "aws_cloudwatch_log_group" "test" { name = "msk_broker_logs" } resource "aws_s3_bucket" "bucket" { bucket = "msk-broker-logs-bucket" acl = "private" } resource "aws_iam_role" "firehose_role" { name = "firehose_test_role" assume_role_policy = <<EOF { "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "firehose.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] } EOF } resource "aws_kinesis_firehose_delivery_stream" "test_stream" { name = "terraform-kinesis-firehose-msk-broker-logs-stream" destination = "s3" s3_configuration { role_arn = aws_iam_role.firehose_role.arn bucket_arn = aws_s3_bucket.bucket.arn } tags = { LogDeliveryEnabled = "placeholder" } lifecycle { ignore_changes = [ tags["LogDeliveryEnabled"], ] } } resource "aws_msk_cluster" "example" { cluster_name = "example" kafka_version = "2.4.1" number_of_broker_nodes = 3 broker_node_group_info { instance_type = "kafka.m5.large" ebs_volume_size = 1000 client_subnets = [ aws_subnet.subnet_az1.id, aws_subnet.subnet_az2.id, aws_subnet.subnet_az3.id, ] security_groups = [aws_security_group.sg.id] } encryption_info { encryption_in_transit { client_broker = "TLS" in_cluster = true } } open_monitoring { prometheus { jmx_exporter { enabled_in_broker = true } node_exporter { enabled_in_broker = true } } } logging_info { broker_logs { cloudwatch_logs { enabled = true log_group = aws_cloudwatch_log_group.test.name } firehose { enabled = true delivery_stream = aws_kinesis_firehose_delivery_stream.test_stream.name } s3 { enabled = true bucket = aws_s3_bucket.bucket.id prefix = "logs/msk-" } } } tags = { foo = "bar" } } output "zookeeper_connect_string" { value = aws_msk_cluster.example.zookeeper_connect_string } output "bootstrap_brokers_tls" { description = "TLS connection host:port pairs" value = aws_msk_cluster.example.bootstrap_brokers_tls } ``` </details> **Expected behavior** No error is thrown as long as an encryption_info block is defined Checkov Version 1.0.669
bridgecrewio/checkov
diff --git a/tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py b/tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py index 23fd0bdf1..0cfd55402 100644 --- a/tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py +++ b/tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py @@ -13,23 +13,6 @@ class TestMSKClusterEncryption(unittest.TestCase): scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) - def test_failure_no_kms(self): - resource_conf = { - "name": "test-project", - "encryption_info": [ - { - "encryption_in_transit": [ - { - "client_broker": ["TLS"], - "in_cluster": ["true"], - } - ], - } - ], - } - scan_result = check.scan_resource_conf(conf=resource_conf) - self.assertEqual(CheckResult.FAILED, scan_result) - def test_failure_non_tls(self): resource_conf = { "name": "test-project", @@ -96,6 +79,33 @@ class TestMSKClusterEncryption(unittest.TestCase): scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) + # Regression test for https://github.com/bridgecrewio/checkov/issues/747 + def test_success_no_encryption_at_rest_kms_key_arn_specified(self): + resource_conf = { + "name": "test-project", + "encryption_info": [{}], + } + scan_result = check.scan_resource_conf(conf=resource_conf) + self.assertEqual(CheckResult.PASSED, scan_result) + + # Regression test for https://github.com/bridgecrewio/checkov/issues/747 + def test_success_encryption_in_transit_and_no_encryption_at_rest_kms_key_arn_specified(self): + resource_conf = { + "name": "test-project", + "encryption_info": [ + { + "encryption_in_transit": [ + { + "client_broker": ["TLS"], + "in_cluster": [True], + } + ], + } + ], + } + scan_result = check.scan_resource_conf(conf=resource_conf) + self.assertEqual(CheckResult.PASSED, scan_result) + if __name__ == '__main__': unittest.main()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y git" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work bc-python-hcl2==0.3.51 boto3==1.33.13 botocore==1.33.13 certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 -e git+https://github.com/bridgecrewio/checkov.git@18b6fab1a1a2b96a9829f6772f228705b31a6310#egg=checkov colorama==0.4.6 deep-merge==0.0.4 dpath==1.5.0 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core gitdb==4.0.12 GitPython==3.1.44 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work importlib-resources==5.12.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==1.0.1 junit-xml==1.9 lark==1.1.9 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 python-dateutil==2.9.0.post0 PyYAML==6.0.1 requests==2.31.0 s3transfer==0.8.2 semantic-version==2.10.0 six==1.15.0 smmap==5.0.2 tabulate==0.9.0 termcolor==2.3.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work tqdm==4.67.1 typing_extensions @ file:///croot/typing_extensions_1669924550328/work update-checker==0.18.0 urllib3==1.26.20 zipp @ file:///croot/zipp_1672387121353/work
name: checkov channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - bc-python-hcl2==0.3.51 - boto3==1.33.13 - botocore==1.33.13 - charset-normalizer==3.4.1 - colorama==0.4.6 - deep-merge==0.0.4 - dpath==1.5.0 - gitdb==4.0.12 - gitpython==3.1.44 - idna==3.10 - importlib-resources==5.12.0 - jmespath==1.0.1 - junit-xml==1.9 - lark==1.1.9 - python-dateutil==2.9.0.post0 - pyyaml==6.0.1 - requests==2.31.0 - s3transfer==0.8.2 - semantic-version==2.10.0 - six==1.15.0 - smmap==5.0.2 - tabulate==0.9.0 - termcolor==2.3.0 - tqdm==4.67.1 - update-checker==0.18.0 - urllib3==1.26.20 prefix: /opt/conda/envs/checkov
[ "tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py::TestMSKClusterEncryption::test_success_encryption_in_transit_and_no_encryption_at_rest_kms_key_arn_specified", "tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py::TestMSKClusterEncryption::test_success_no_encryption_at_rest_kms_key_arn_specified" ]
[]
[ "tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py::TestMSKClusterEncryption::test_failure", "tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py::TestMSKClusterEncryption::test_failure_in_cluster", "tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py::TestMSKClusterEncryption::test_failure_non_tls", "tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py::TestMSKClusterEncryption::test_success", "tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py::TestMSKClusterEncryption::test_success_no_encrypt_block" ]
[]
Apache License 2.0
9,263
450
[ "checkov/terraform/checks/resource/aws/MSKClusterEncryption.py" ]