problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_4262
rasdani/github-patches
git_diff
certbot__certbot-772
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rename /etc/letsencrypt/configs to /etc/letsencrypt/renewal Since it doesn't contain anything except renewal configuration files, people will probably find the current name confusing. </issue> <code> [start of letsencrypt/constants.py] 1 """Let's Encrypt constants.""" 2 import os 3 import logging 4 5 from acme import challenges 6 7 8 SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins" 9 """Setuptools entry point group name for plugins.""" 10 11 CLI_DEFAULTS = dict( 12 config_files=[ 13 "/etc/letsencrypt/cli.ini", 14 # http://freedesktop.org/wiki/Software/xdg-user-dirs/ 15 os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"), 16 "letsencrypt", "cli.ini"), 17 ], 18 verbose_count=-(logging.WARNING / 10), 19 server="https://acme-staging.api.letsencrypt.org/directory", 20 rsa_key_size=2048, 21 rollback_checkpoints=1, 22 config_dir="/etc/letsencrypt", 23 work_dir="/var/lib/letsencrypt", 24 logs_dir="/var/log/letsencrypt", 25 no_verify_ssl=False, 26 dvsni_port=challenges.DVSNI.PORT, 27 28 auth_cert_path="./cert.pem", 29 auth_chain_path="./chain.pem", 30 ) 31 """Defaults for CLI flags and `.IConfig` attributes.""" 32 33 34 RENEWER_DEFAULTS = dict( 35 renewer_enabled="yes", 36 renew_before_expiry="30 days", 37 deploy_before_expiry="20 days", 38 ) 39 """Defaults for renewer script.""" 40 41 42 EXCLUSIVE_CHALLENGES = frozenset([frozenset([ 43 challenges.DVSNI, challenges.SimpleHTTP])]) 44 """Mutually exclusive challenges.""" 45 46 47 ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"] 48 """List of possible :class:`letsencrypt.interfaces.IInstaller` 49 enhancements. 50 51 List of expected options parameters: 52 - redirect: None 53 - http-header: TODO 54 - ocsp-stapling: TODO 55 - spdy: TODO 56 57 """ 58 59 ARCHIVE_DIR = "archive" 60 """Archive directory, relative to `IConfig.config_dir`.""" 61 62 CONFIG_DIRS_MODE = 0o755 63 """Directory mode for ``.IConfig.config_dir`` et al.""" 64 65 ACCOUNTS_DIR = "accounts" 66 """Directory where all accounts are saved.""" 67 68 BACKUP_DIR = "backups" 69 """Directory (relative to `IConfig.work_dir`) where backups are kept.""" 70 71 CERT_DIR = "certs" 72 """See `.IConfig.cert_dir`.""" 73 74 CERT_KEY_BACKUP_DIR = "keys-certs" 75 """Directory where all certificates and keys are stored (relative to 76 `IConfig.work_dir`). Used for easy revocation.""" 77 78 IN_PROGRESS_DIR = "IN_PROGRESS" 79 """Directory used before a permanent checkpoint is finalized (relative to 80 `IConfig.work_dir`).""" 81 82 KEY_DIR = "keys" 83 """Directory (relative to `IConfig.config_dir`) where keys are saved.""" 84 85 LIVE_DIR = "live" 86 """Live directory, relative to `IConfig.config_dir`.""" 87 88 TEMP_CHECKPOINT_DIR = "temp_checkpoint" 89 """Temporary checkpoint directory (relative to `IConfig.work_dir`).""" 90 91 RENEWAL_CONFIGS_DIR = "configs" 92 """Renewal configs directory, relative to `IConfig.config_dir`.""" 93 94 RENEWER_CONFIG_FILENAME = "renewer.conf" 95 """Renewer config file name (relative to `IConfig.config_dir`).""" 96 [end of letsencrypt/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/letsencrypt/constants.py b/letsencrypt/constants.py --- a/letsencrypt/constants.py +++ b/letsencrypt/constants.py @@ -88,7 +88,7 @@ TEMP_CHECKPOINT_DIR = "temp_checkpoint" """Temporary checkpoint directory (relative to `IConfig.work_dir`).""" -RENEWAL_CONFIGS_DIR = "configs" +RENEWAL_CONFIGS_DIR = "renewal" """Renewal configs directory, relative to `IConfig.config_dir`.""" RENEWER_CONFIG_FILENAME = "renewer.conf"
{"golden_diff": "diff --git a/letsencrypt/constants.py b/letsencrypt/constants.py\n--- a/letsencrypt/constants.py\n+++ b/letsencrypt/constants.py\n@@ -88,7 +88,7 @@\n TEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n \"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n \n-RENEWAL_CONFIGS_DIR = \"configs\"\n+RENEWAL_CONFIGS_DIR = \"renewal\"\n \"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n \n RENEWER_CONFIG_FILENAME = \"renewer.conf\"\n", "issue": "rename /etc/letsencrypt/configs to /etc/letsencrypt/renewal\nSince it doesn't contain anything except renewal configuration files, people will probably find the current name confusing.\n\n", "before_files": [{"content": "\"\"\"Let's Encrypt constants.\"\"\"\nimport os\nimport logging\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n verbose_count=-(logging.WARNING / 10),\n server=\"https://acme-staging.api.letsencrypt.org/directory\",\n rsa_key_size=2048,\n rollback_checkpoints=1,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n no_verify_ssl=False,\n dvsni_port=challenges.DVSNI.PORT,\n\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n)\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n deploy_before_expiry=\"20 days\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nEXCLUSIVE_CHALLENGES = frozenset([frozenset([\n challenges.DVSNI, challenges.SimpleHTTP])])\n\"\"\"Mutually exclusive challenges.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`letsencrypt.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- http-header: TODO\n- ocsp-stapling: TODO\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCERT_DIR = \"certs\"\n\"\"\"See `.IConfig.cert_dir`.\"\"\"\n\nCERT_KEY_BACKUP_DIR = \"keys-certs\"\n\"\"\"Directory where all certificates and keys are stored (relative to\n`IConfig.work_dir`). Used for easy revocation.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"configs\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWER_CONFIG_FILENAME = \"renewer.conf\"\n\"\"\"Renewer config file name (relative to `IConfig.config_dir`).\"\"\"\n", "path": "letsencrypt/constants.py"}]}
1,439
122
gh_patches_debug_34433
rasdani/github-patches
git_diff
pantsbuild__pants-14131
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages) **Describe the bug** Created a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem. Essentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error > ResolveError: Directory 'backend' does not contain any BUILD files After a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets. For example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile. As per the README in the repo: ```bash # Should create a pex at dist/backend/projecta/projecta.pex # Docker image created successfully as projecta-container:latest ./pants package backend/projecta:: # Should create a pex at dist/backend.projectc/projectc.pex # Docker image created successfully as projectc-container:latest ./pants package backend/projectc:: ``` ```bash # Should create a pex at dist/backend/projectb.pex ./pants package backend/projectb:projectb # FAILS: With ResolveError ./pants package backend/projectb:projectb-container ``` So, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`. This isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message. **Pants version** - 2.8.0 - 2.9.0rc1 **OS** macOS 12.1 Untested on Linux </issue> <code> [start of src/python/pants/backend/docker/util_rules/dependencies.py] 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest 5 from pants.backend.docker.target_types import DockerDependenciesField 6 from pants.core.goals.package import PackageFieldSet 7 from pants.engine.addresses import Addresses, UnparsedAddressInputs 8 from pants.engine.rules import Get, collect_rules, rule 9 from pants.engine.target import ( 10 FieldSetsPerTarget, 11 FieldSetsPerTargetRequest, 12 InjectDependenciesRequest, 13 InjectedDependencies, 14 Targets, 15 ) 16 from pants.engine.unions import UnionRule 17 18 19 class InjectDockerDependencies(InjectDependenciesRequest): 20 inject_for = DockerDependenciesField 21 22 23 @rule 24 async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies: 25 """Inspects COPY instructions in the Dockerfile for references to known targets.""" 26 dockerfile_info = await Get( 27 DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address) 28 ) 29 targets = await Get( 30 Targets, 31 UnparsedAddressInputs( 32 dockerfile_info.putative_target_addresses, 33 owning_address=None, 34 ), 35 ) 36 package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets)) 37 referenced_targets = ( 38 field_sets[0].address for field_sets in package.collection if len(field_sets) > 0 39 ) 40 return InjectedDependencies(Addresses(referenced_targets)) 41 42 43 def rules(): 44 return [ 45 *collect_rules(), 46 UnionRule(InjectDependenciesRequest, InjectDockerDependencies), 47 ] 48 [end of src/python/pants/backend/docker/util_rules/dependencies.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py --- a/src/python/pants/backend/docker/util_rules/dependencies.py +++ b/src/python/pants/backend/docker/util_rules/dependencies.py @@ -3,6 +3,7 @@ from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest from pants.backend.docker.target_types import DockerDependenciesField +from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses from pants.core.goals.package import PackageFieldSet from pants.engine.addresses import Addresses, UnparsedAddressInputs from pants.engine.rules import Get, collect_rules, rule @@ -22,17 +23,28 @@ @rule async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies: - """Inspects COPY instructions in the Dockerfile for references to known targets.""" + """Inspects COPY instructions in the Dockerfile for references to known packagable targets.""" dockerfile_info = await Get( DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address) ) - targets = await Get( - Targets, + + # Parse all putative target addresses. + putative_addresses = await Get( + Addresses, UnparsedAddressInputs( dockerfile_info.putative_target_addresses, owning_address=None, ), ) + + # Get the target for those addresses that are known. + directories = {address.spec_path for address in putative_addresses} + all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories))) + targets = await Get( + Targets, Addresses((address for address in putative_addresses if address in all_addresses)) + ) + + # Only keep those targets that we can "package". package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets)) referenced_targets = ( field_sets[0].address for field_sets in package.collection if len(field_sets) > 0
{"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py\n--- a/src/python/pants/backend/docker/util_rules/dependencies.py\n+++ b/src/python/pants/backend/docker/util_rules/dependencies.py\n@@ -3,6 +3,7 @@\n \n from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\n from pants.backend.docker.target_types import DockerDependenciesField\n+from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses\n from pants.core.goals.package import PackageFieldSet\n from pants.engine.addresses import Addresses, UnparsedAddressInputs\n from pants.engine.rules import Get, collect_rules, rule\n@@ -22,17 +23,28 @@\n \n @rule\n async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n- \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n+ \"\"\"Inspects COPY instructions in the Dockerfile for references to known packagable targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n- targets = await Get(\n- Targets,\n+\n+ # Parse all putative target addresses.\n+ putative_addresses = await Get(\n+ Addresses,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=None,\n ),\n )\n+\n+ # Get the target for those addresses that are known.\n+ directories = {address.spec_path for address in putative_addresses}\n+ all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))\n+ targets = await Get(\n+ Targets, Addresses((address for address in putative_addresses if address in all_addresses))\n+ )\n+\n+ # Only keep those targets that we can \"package\".\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n", "issue": "ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages)\n**Describe the bug**\r\n\r\nCreated a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem. \r\n\r\nEssentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error \r\n\r\n> ResolveError: Directory 'backend' does not contain any BUILD files\r\n\r\nAfter a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets. \r\n\r\nFor example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile.\r\n\r\nAs per the README in the repo:\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projecta/projecta.pex\r\n# Docker image created successfully as projecta-container:latest\r\n./pants package backend/projecta::\r\n\r\n# Should create a pex at dist/backend.projectc/projectc.pex\r\n# Docker image created successfully as projectc-container:latest\r\n./pants package backend/projectc::\r\n```\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projectb.pex\r\n./pants package backend/projectb:projectb\r\n\r\n# FAILS: With ResolveError\r\n./pants package backend/projectb:projectb-container \r\n```\r\n\r\nSo, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`.\r\n\r\nThis isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message.\r\n\r\n**Pants version**\r\n\r\n- 2.8.0\r\n- 2.9.0rc1\r\n\r\n**OS**\r\n\r\nmacOS 12.1\r\nUntested on Linux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\nfrom pants.backend.docker.target_types import DockerDependenciesField\nfrom pants.core.goals.package import PackageFieldSet\nfrom pants.engine.addresses import Addresses, UnparsedAddressInputs\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import (\n FieldSetsPerTarget,\n FieldSetsPerTargetRequest,\n InjectDependenciesRequest,\n InjectedDependencies,\n Targets,\n)\nfrom pants.engine.unions import UnionRule\n\n\nclass InjectDockerDependencies(InjectDependenciesRequest):\n inject_for = DockerDependenciesField\n\n\n@rule\nasync def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n targets = await Get(\n Targets,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=None,\n ),\n )\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n )\n return InjectedDependencies(Addresses(referenced_targets))\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(InjectDependenciesRequest, InjectDockerDependencies),\n ]\n", "path": "src/python/pants/backend/docker/util_rules/dependencies.py"}]}
1,452
449
gh_patches_debug_22472
rasdani/github-patches
git_diff
mozilla__kitsune-3198
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove all references to ethn.io and Treejack Confirm is ethn.io is something we still use, remove otherwise https://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92 - [ ] remove treejack waffle.switch - [ ] remove waffle.flag('ethnio-all') Remove all references to ethn.io and Treejack Confirm is ethn.io is something we still use, remove otherwise https://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92 - [ ] remove treejack waffle.switch - [ ] remove waffle.flag('ethnio-all') </issue> <code> [start of kitsune/sumo/migrations/0002_initial_data.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 4 from django.db import models, migrations 5 6 7 def create_ratelimit_bypass_perm(apps, schema_editor): 8 # First we get or create the content type. 9 ContentType = apps.get_model('contenttypes', 'ContentType') 10 global_permission_ct, created = ContentType.objects.get_or_create( 11 model='global_permission', app_label='sumo') 12 13 # Then we create a permission attached to that content type. 14 Permission = apps.get_model('auth', 'Permission') 15 perm = Permission.objects.create( 16 name='Bypass Ratelimits', 17 content_type=global_permission_ct, 18 codename='bypass_ratelimit') 19 20 21 def remove_ratelimit_bypass_perm(apps, schema_editor): 22 Permission = apps.get_model('auth', 'Permission') 23 perm = Permission.objects.filter(codename='bypass_ratelimit').delete() 24 25 26 def create_treejack_switch(apps, schema_editor): 27 Switch = apps.get_model('waffle', 'Switch') 28 Switch.objects.create( 29 name='treejack', 30 note='Enables/disables the Treejack snippet.', 31 active=False) 32 33 34 def remove_treejack_switch(apps, schema_editor): 35 Switch = apps.get_model('waffle', 'Switch') 36 Switch.objects.filter(name='treejack').delete() 37 38 39 def create_refresh_survey_flag(apps, schema_editor): 40 Sample = apps.get_model('waffle', 'Sample') 41 Sample.objects.get_or_create( 42 name='refresh-survey', 43 note='Samples users that refresh Firefox to give them a survey.', 44 percent=50.0) 45 46 47 def remove_refresh_survey_flag(apps, schema_editor): 48 Sample = apps.get_model('waffle', 'Sample') 49 Sample.objects.filter(name='refresh-survey').delete() 50 51 52 class Migration(migrations.Migration): 53 54 dependencies = [ 55 ('sumo', '0001_initial'), 56 ('auth', '0001_initial'), 57 ('contenttypes', '0001_initial'), 58 ('waffle', '0001_initial'), 59 ] 60 61 operations = [ 62 migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm), 63 migrations.RunPython(create_treejack_switch, remove_treejack_switch), 64 migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag), 65 ] 66 [end of kitsune/sumo/migrations/0002_initial_data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kitsune/sumo/migrations/0002_initial_data.py b/kitsune/sumo/migrations/0002_initial_data.py --- a/kitsune/sumo/migrations/0002_initial_data.py +++ b/kitsune/sumo/migrations/0002_initial_data.py @@ -23,19 +23,6 @@ perm = Permission.objects.filter(codename='bypass_ratelimit').delete() -def create_treejack_switch(apps, schema_editor): - Switch = apps.get_model('waffle', 'Switch') - Switch.objects.create( - name='treejack', - note='Enables/disables the Treejack snippet.', - active=False) - - -def remove_treejack_switch(apps, schema_editor): - Switch = apps.get_model('waffle', 'Switch') - Switch.objects.filter(name='treejack').delete() - - def create_refresh_survey_flag(apps, schema_editor): Sample = apps.get_model('waffle', 'Sample') Sample.objects.get_or_create( @@ -60,6 +47,5 @@ operations = [ migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm), - migrations.RunPython(create_treejack_switch, remove_treejack_switch), migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag), ]
{"golden_diff": "diff --git a/kitsune/sumo/migrations/0002_initial_data.py b/kitsune/sumo/migrations/0002_initial_data.py\n--- a/kitsune/sumo/migrations/0002_initial_data.py\n+++ b/kitsune/sumo/migrations/0002_initial_data.py\n@@ -23,19 +23,6 @@\n perm = Permission.objects.filter(codename='bypass_ratelimit').delete()\n \n \n-def create_treejack_switch(apps, schema_editor):\n- Switch = apps.get_model('waffle', 'Switch')\n- Switch.objects.create(\n- name='treejack',\n- note='Enables/disables the Treejack snippet.',\n- active=False)\n-\n-\n-def remove_treejack_switch(apps, schema_editor):\n- Switch = apps.get_model('waffle', 'Switch')\n- Switch.objects.filter(name='treejack').delete()\n-\n-\n def create_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.get_or_create(\n@@ -60,6 +47,5 @@\n \n operations = [\n migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm),\n- migrations.RunPython(create_treejack_switch, remove_treejack_switch),\n migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag),\n ]\n", "issue": "Remove all references to ethn.io and Treejack\nConfirm is ethn.io is something we still use, remove otherwise\r\n\r\nhttps://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92\r\n\r\n- [ ] remove treejack waffle.switch\r\n- [ ] remove waffle.flag('ethnio-all')\nRemove all references to ethn.io and Treejack\nConfirm is ethn.io is something we still use, remove otherwise\r\n\r\nhttps://github.com/mozilla/kitsune/blob/master/kitsune/products/jinja2/products/product.html#L92\r\n\r\n- [ ] remove treejack waffle.switch\r\n- [ ] remove waffle.flag('ethnio-all')\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_ratelimit_bypass_perm(apps, schema_editor):\n # First we get or create the content type.\n ContentType = apps.get_model('contenttypes', 'ContentType')\n global_permission_ct, created = ContentType.objects.get_or_create(\n model='global_permission', app_label='sumo')\n\n # Then we create a permission attached to that content type.\n Permission = apps.get_model('auth', 'Permission')\n perm = Permission.objects.create(\n name='Bypass Ratelimits',\n content_type=global_permission_ct,\n codename='bypass_ratelimit')\n\n\ndef remove_ratelimit_bypass_perm(apps, schema_editor):\n Permission = apps.get_model('auth', 'Permission')\n perm = Permission.objects.filter(codename='bypass_ratelimit').delete()\n\n\ndef create_treejack_switch(apps, schema_editor):\n Switch = apps.get_model('waffle', 'Switch')\n Switch.objects.create(\n name='treejack',\n note='Enables/disables the Treejack snippet.',\n active=False)\n\n\ndef remove_treejack_switch(apps, schema_editor):\n Switch = apps.get_model('waffle', 'Switch')\n Switch.objects.filter(name='treejack').delete()\n\n\ndef create_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.get_or_create(\n name='refresh-survey',\n note='Samples users that refresh Firefox to give them a survey.',\n percent=50.0)\n\n\ndef remove_refresh_survey_flag(apps, schema_editor):\n Sample = apps.get_model('waffle', 'Sample')\n Sample.objects.filter(name='refresh-survey').delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sumo', '0001_initial'),\n ('auth', '0001_initial'),\n ('contenttypes', '0001_initial'),\n ('waffle', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(create_ratelimit_bypass_perm, remove_ratelimit_bypass_perm),\n migrations.RunPython(create_treejack_switch, remove_treejack_switch),\n migrations.RunPython(create_refresh_survey_flag, remove_refresh_survey_flag),\n ]\n", "path": "kitsune/sumo/migrations/0002_initial_data.py"}]}
1,327
305
gh_patches_debug_25252
rasdani/github-patches
git_diff
networkx__networkx-3016
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `OrderedGraph.subgraph` does not maintain the order of the nodes A subgraph built from a `OrderedGraph` should keep the order of the nodes, yet nodes in the subgraph are neither in the order of the initial graph, nor in the order of the selection. The issue can be seen from the following snippet: ```python graph = nx.OrderedGraph() nodes = list(range(10)) random.shuffle(nodes) graph.add_nodes_from(nodes) # key order is (7, 2, 1, 9, 0, 8, 6, 4, 3, 5) # We create a selection in the same order as the initial graph keys to_keep = [key for key in graph if key % 2 == 0] # [2, 0, 8, 6, 4] subgraph = graph.subgraph(to_keep) # (0, 2, 4, 6, 8) # We create a selection in a different order subgraph = graph.subgraph([5, 3, 1]) # (1, 3, 5) ``` From what I see, the issue is due to `Graph.subgraph` passing the selection to `nx.filters.show_nodes` that transforms it to a set. The nodes in the subgraph are then in the order of the set; as sets do not preserve the order, the order of the initial graph if not preserved. </issue> <code> [start of networkx/classes/ordered.py] 1 """ 2 Consistently ordered variants of the default base classes. 3 4 The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of 5 nodes and edges. The order of node reporting agrees with node adding, but for 6 edges, the order is not necessarily the order that the edges were added. 7 8 In general, you should use the default (i.e., unordered) graph classes. 9 However, there are times (e.g., when testing) when you may need the 10 order preserved. 11 """ 12 from collections import OrderedDict 13 14 from .graph import Graph 15 from .multigraph import MultiGraph 16 from .digraph import DiGraph 17 from .multidigraph import MultiDiGraph 18 19 __all__ = [] 20 21 __all__.extend([ 22 'OrderedGraph', 23 'OrderedDiGraph', 24 'OrderedMultiGraph', 25 'OrderedMultiDiGraph', 26 ]) 27 28 29 class OrderedGraph(Graph): 30 """Consistently ordered variant of :class:`~networkx.Graph`.""" 31 node_dict_factory = OrderedDict 32 adjlist_outer_dict_factory = OrderedDict 33 adjlist_inner_dict_factory = OrderedDict 34 edge_attr_dict_factory = OrderedDict 35 36 def fresh_copy(self): 37 """Return a fresh copy graph with the same data structure. 38 39 A fresh copy has no nodes, edges or graph attributes. It is 40 the same data structure as the current graph. This method is 41 typically used to create an empty version of the graph. 42 """ 43 return OrderedGraph() 44 45 46 class OrderedDiGraph(DiGraph): 47 """Consistently ordered variant of :class:`~networkx.DiGraph`.""" 48 node_dict_factory = OrderedDict 49 adjlist_outer_dict_factory = OrderedDict 50 adjlist_inner_dict_factory = OrderedDict 51 edge_attr_dict_factory = OrderedDict 52 53 def fresh_copy(self): 54 """Return a fresh copy graph with the same data structure. 55 56 A fresh copy has no nodes, edges or graph attributes. It is 57 the same data structure as the current graph. This method is 58 typically used to create an empty version of the graph. 59 """ 60 return OrderedDiGraph() 61 62 63 class OrderedMultiGraph(MultiGraph): 64 """Consistently ordered variant of :class:`~networkx.MultiGraph`.""" 65 node_dict_factory = OrderedDict 66 adjlist_outer_dict_factory = OrderedDict 67 adjlist_inner_dict_factory = OrderedDict 68 edge_key_dict_factory = OrderedDict 69 edge_attr_dict_factory = OrderedDict 70 71 def fresh_copy(self): 72 """Return a fresh copy graph with the same data structure. 73 74 A fresh copy has no nodes, edges or graph attributes. It is 75 the same data structure as the current graph. This method is 76 typically used to create an empty version of the graph. 77 """ 78 return OrderedMultiGraph() 79 80 81 class OrderedMultiDiGraph(MultiDiGraph): 82 """Consistently ordered variant of :class:`~networkx.MultiDiGraph`.""" 83 node_dict_factory = OrderedDict 84 adjlist_outer_dict_factory = OrderedDict 85 adjlist_inner_dict_factory = OrderedDict 86 edge_key_dict_factory = OrderedDict 87 edge_attr_dict_factory = OrderedDict 88 89 def fresh_copy(self): 90 """Return a fresh copy graph with the same data structure. 91 92 A fresh copy has no nodes, edges or graph attributes. It is 93 the same data structure as the current graph. This method is 94 typically used to create an empty version of the graph. 95 """ 96 return OrderedMultiDiGraph() 97 [end of networkx/classes/ordered.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/networkx/classes/ordered.py b/networkx/classes/ordered.py --- a/networkx/classes/ordered.py +++ b/networkx/classes/ordered.py @@ -1,5 +1,10 @@ """ Consistently ordered variants of the default base classes. +Note that if you are using Python 3.6, you shouldn't need these classes +because the dicts in Python 3.6 are ordered. +Note also that there are many differing expectations for the word "ordered" +and that these classes may not provide the order you expect. +The intent here is to give a consistent order not a particular order. The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of nodes and edges. The order of node reporting agrees with node adding, but for @@ -8,6 +13,17 @@ In general, you should use the default (i.e., unordered) graph classes. However, there are times (e.g., when testing) when you may need the order preserved. + +Special care is required when using subgraphs of the Ordered classes. +The order of nodes in the subclass is not necessarily the same order +as the original class. In general it is probably better to avoid using +subgraphs and replace with code similar to: + + # instead of SG = G.subgraph(ordered_nodes) + SG=nx.OrderedGraph() + SG.add_nodes_from(ordered_nodes) + SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG) + """ from collections import OrderedDict
{"golden_diff": "diff --git a/networkx/classes/ordered.py b/networkx/classes/ordered.py\n--- a/networkx/classes/ordered.py\n+++ b/networkx/classes/ordered.py\n@@ -1,5 +1,10 @@\n \"\"\"\n Consistently ordered variants of the default base classes.\n+Note that if you are using Python 3.6, you shouldn't need these classes\n+because the dicts in Python 3.6 are ordered.\n+Note also that there are many differing expectations for the word \"ordered\"\n+and that these classes may not provide the order you expect.\n+The intent here is to give a consistent order not a particular order.\n \n The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\n nodes and edges. The order of node reporting agrees with node adding, but for\n@@ -8,6 +13,17 @@\n In general, you should use the default (i.e., unordered) graph classes.\n However, there are times (e.g., when testing) when you may need the\n order preserved.\n+\n+Special care is required when using subgraphs of the Ordered classes.\n+The order of nodes in the subclass is not necessarily the same order\n+as the original class. In general it is probably better to avoid using\n+subgraphs and replace with code similar to:\n+\n+ # instead of SG = G.subgraph(ordered_nodes)\n+ SG=nx.OrderedGraph()\n+ SG.add_nodes_from(ordered_nodes)\n+ SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG)\n+\n \"\"\"\n from collections import OrderedDict\n", "issue": "`OrderedGraph.subgraph` does not maintain the order of the nodes\nA subgraph built from a `OrderedGraph` should keep the order of the nodes, yet nodes in the subgraph are neither in the order of the initial graph, nor in the order of the selection. The issue can be seen from the following snippet:\r\n\r\n```python\r\ngraph = nx.OrderedGraph()\r\nnodes = list(range(10))\r\nrandom.shuffle(nodes)\r\ngraph.add_nodes_from(nodes) # key order is (7, 2, 1, 9, 0, 8, 6, 4, 3, 5)\r\n\r\n# We create a selection in the same order as the initial graph keys\r\nto_keep = [key for key in graph if key % 2 == 0] # [2, 0, 8, 6, 4]\r\nsubgraph = graph.subgraph(to_keep) # (0, 2, 4, 6, 8)\r\n\r\n# We create a selection in a different order\r\nsubgraph = graph.subgraph([5, 3, 1]) # (1, 3, 5)\r\n```\r\n\r\nFrom what I see, the issue is due to `Graph.subgraph` passing the selection to `nx.filters.show_nodes` that transforms it to a set. The nodes in the subgraph are then in the order of the set; as sets do not preserve the order, the order of the initial graph if not preserved.\n", "before_files": [{"content": "\"\"\"\nConsistently ordered variants of the default base classes.\n\nThe Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\nnodes and edges. The order of node reporting agrees with node adding, but for\nedges, the order is not necessarily the order that the edges were added.\n\nIn general, you should use the default (i.e., unordered) graph classes.\nHowever, there are times (e.g., when testing) when you may need the\norder preserved.\n\"\"\"\nfrom collections import OrderedDict\n\nfrom .graph import Graph\nfrom .multigraph import MultiGraph\nfrom .digraph import DiGraph\nfrom .multidigraph import MultiDiGraph\n\n__all__ = []\n\n__all__.extend([\n 'OrderedGraph',\n 'OrderedDiGraph',\n 'OrderedMultiGraph',\n 'OrderedMultiDiGraph',\n])\n\n\nclass OrderedGraph(Graph):\n \"\"\"Consistently ordered variant of :class:`~networkx.Graph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedGraph()\n\n\nclass OrderedDiGraph(DiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.DiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedDiGraph()\n\n\nclass OrderedMultiGraph(MultiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiGraph()\n\n\nclass OrderedMultiDiGraph(MultiDiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiDiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiDiGraph()\n", "path": "networkx/classes/ordered.py"}]}
1,752
353
gh_patches_debug_16045
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-1601
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Separate XLIFF export options ### Motivation <!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. --> Apparently, the two options to export XLIFF files (either all pages or only public pages) are too similar and lead to accidental misclicks. ### Proposed Solution <!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. --> - Move the public option to the top (since this is the most used) - Reword the "all pages" option to make it more obvious that this includes drafts - Maybe add an exclamation mark or similar to the "all pages" option ### Alternatives <!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. --> ### Additional Context <!-- Add any other information or screenshots about the feature request here. --> </issue> <code> [start of integreat_cms/cms/views/pages/page_bulk_actions.py] 1 import logging 2 3 from django.contrib import messages 4 from django.shortcuts import get_object_or_404 5 from django.utils.translation import ugettext as _ 6 from django.views.generic.list import MultipleObjectMixin 7 8 from ....xliff.utils import pages_to_xliff_file 9 from ...models import Page 10 from ...utils.pdf_utils import generate_pdf 11 from ...utils.translation_utils import ugettext_many_lazy as __ 12 from ..bulk_action_views import BulkActionView 13 14 logger = logging.getLogger(__name__) 15 16 17 class PageBulkActionMixin(MultipleObjectMixin): 18 """ 19 Mixin for page bulk actions 20 """ 21 22 #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView` 23 model = Page 24 25 26 # pylint: disable=too-many-ancestors 27 class GeneratePdfView(PageBulkActionMixin, BulkActionView): 28 """ 29 Bulk action for generating a PDF document of the content 30 """ 31 32 #: Whether the view requires change permissions 33 require_change_permission = False 34 #: Whether the public translation objects should be prefetched 35 prefetch_public_translations = True 36 37 def post(self, request, *args, **kwargs): 38 r""" 39 Apply the bulk action on every item in the queryset and redirect 40 41 :param request: The current request 42 :type request: ~django.http.HttpResponse 43 44 :param \*args: The supplied arguments 45 :type \*args: list 46 47 :param \**kwargs: The supplied keyword arguments 48 :type \**kwargs: dict 49 50 :return: The redirect 51 :rtype: ~django.http.HttpResponseRedirect 52 """ 53 # Generate PDF document and redirect to it 54 return generate_pdf( 55 request.region, 56 kwargs.get("language_slug"), 57 self.get_queryset(), 58 ) 59 60 61 # pylint: disable=too-many-ancestors 62 class ExportXliffView(PageBulkActionMixin, BulkActionView): 63 """ 64 Bulk action for generating XLIFF files for translations 65 """ 66 67 #: Whether only public translation should be exported 68 only_public = False 69 #: Whether the view requires change permissions 70 require_change_permission = False 71 72 def post(self, request, *args, **kwargs): 73 r""" 74 Function for handling a pdf export request for pages. 75 The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf` 76 77 :param request: The current request 78 :type request: ~django.http.HttpResponse 79 80 :param \*args: The supplied arguments 81 :type \*args: list 82 83 :param \**kwargs: The supplied keyword arguments 84 :type \**kwargs: dict 85 86 :return: The redirect 87 :rtype: ~django.http.HttpResponseRedirect 88 """ 89 target_language = get_object_or_404( 90 self.request.region.language_tree_nodes, 91 language__slug=kwargs.get("language_slug"), 92 parent__isnull=False, 93 ).language 94 95 xliff_file_url = pages_to_xliff_file( 96 request, self.get_queryset(), target_language, only_public=self.only_public 97 ) 98 if xliff_file_url: 99 # Insert link with automatic download into success message 100 messages.success( 101 request, 102 __( 103 _("XLIFF file for translation to {} successfully created.").format( 104 target_language 105 ), 106 _( 107 "If the download does not start automatically, please click {}here{}." 108 ).format( 109 f"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>", 110 "</a>", 111 ), 112 ), 113 ) 114 115 # Let the base view handle the redirect 116 return super().post(request, *args, **kwargs) 117 [end of integreat_cms/cms/views/pages/page_bulk_actions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/integreat_cms/cms/views/pages/page_bulk_actions.py b/integreat_cms/cms/views/pages/page_bulk_actions.py --- a/integreat_cms/cms/views/pages/page_bulk_actions.py +++ b/integreat_cms/cms/views/pages/page_bulk_actions.py @@ -100,9 +100,13 @@ messages.success( request, __( - _("XLIFF file for translation to {} successfully created.").format( - target_language - ), + _( + "XLIFF file with published pages only for translation to {} successfully created." + ).format(target_language) + if self.only_public + else _( + "XLIFF file with unpublished and published pages for translation to {} successfully created." + ).format(target_language), _( "If the download does not start automatically, please click {}here{}." ).format(
{"golden_diff": "diff --git a/integreat_cms/cms/views/pages/page_bulk_actions.py b/integreat_cms/cms/views/pages/page_bulk_actions.py\n--- a/integreat_cms/cms/views/pages/page_bulk_actions.py\n+++ b/integreat_cms/cms/views/pages/page_bulk_actions.py\n@@ -100,9 +100,13 @@\n messages.success(\n request,\n __(\n- _(\"XLIFF file for translation to {} successfully created.\").format(\n- target_language\n- ),\n+ _(\n+ \"XLIFF file with published pages only for translation to {} successfully created.\"\n+ ).format(target_language)\n+ if self.only_public\n+ else _(\n+ \"XLIFF file with unpublished and published pages for translation to {} successfully created.\"\n+ ).format(target_language),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n", "issue": "Separate XLIFF export options\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\nApparently, the two options to export XLIFF files (either all pages or only public pages) are too similar and lead to accidental misclicks.\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\n- Move the public option to the top (since this is the most used)\r\n- Reword the \"all pages\" option to make it more obvious that this includes drafts\r\n- Maybe add an exclamation mark or similar to the \"all pages\" option\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->\r\n\r\n\r\n### Additional Context\r\n<!-- Add any other information or screenshots about the feature request here. -->\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom ....xliff.utils import pages_to_xliff_file\nfrom ...models import Page\nfrom ...utils.pdf_utils import generate_pdf\nfrom ...utils.translation_utils import ugettext_many_lazy as __\nfrom ..bulk_action_views import BulkActionView\n\nlogger = logging.getLogger(__name__)\n\n\nclass PageBulkActionMixin(MultipleObjectMixin):\n \"\"\"\n Mixin for page bulk actions\n \"\"\"\n\n #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView`\n model = Page\n\n\n# pylint: disable=too-many-ancestors\nclass GeneratePdfView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating a PDF document of the content\n \"\"\"\n\n #: Whether the view requires change permissions\n require_change_permission = False\n #: Whether the public translation objects should be prefetched\n prefetch_public_translations = True\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply the bulk action on every item in the queryset and redirect\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # Generate PDF document and redirect to it\n return generate_pdf(\n request.region,\n kwargs.get(\"language_slug\"),\n self.get_queryset(),\n )\n\n\n# pylint: disable=too-many-ancestors\nclass ExportXliffView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating XLIFF files for translations\n \"\"\"\n\n #: Whether only public translation should be exported\n only_public = False\n #: Whether the view requires change permissions\n require_change_permission = False\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Function for handling a pdf export request for pages.\n The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf`\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n target_language = get_object_or_404(\n self.request.region.language_tree_nodes,\n language__slug=kwargs.get(\"language_slug\"),\n parent__isnull=False,\n ).language\n\n xliff_file_url = pages_to_xliff_file(\n request, self.get_queryset(), target_language, only_public=self.only_public\n )\n if xliff_file_url:\n # Insert link with automatic download into success message\n messages.success(\n request,\n __(\n _(\"XLIFF file for translation to {} successfully created.\").format(\n target_language\n ),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n f\"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>\",\n \"</a>\",\n ),\n ),\n )\n\n # Let the base view handle the redirect\n return super().post(request, *args, **kwargs)\n", "path": "integreat_cms/cms/views/pages/page_bulk_actions.py"}]}
1,800
200
gh_patches_debug_40548
rasdani/github-patches
git_diff
huggingface__huggingface_hub-285
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add sentence similarity for spaCy Computing similarity is simple with `spaCy` and there is an existing [template pipeline](https://github.com/huggingface/huggingface_hub/blob/main/api-inference-community/docker_images/common/app/pipelines/sentence_similarity.py) for sentence similarity at which can be modified for this. https://spacy.io/api/doc#similarity </issue> <code> [start of api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py] 1 from typing import Dict, List, Union 2 3 from app.pipelines import Pipeline 4 5 6 class SentenceSimilarityPipeline(Pipeline): 7 def __init__( 8 self, 9 model_id: str, 10 ): 11 # IMPLEMENT_THIS 12 # Preload all the elements you are going to need at inference. 13 # For instance your model, processors, tokenizer that might be needed. 14 # This function is only called once, so do all the heavy processing I/O here 15 raise NotImplementedError( 16 "Please implement SentenceSimilarityPipeline __init__ function" 17 ) 18 19 def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]: 20 """ 21 Args: 22 inputs (:obj:`dict`): 23 a dictionary containing two keys, 'source_sentence' mapping 24 to the sentence that will be compared against all the others, 25 and 'sentences', mapping to a list of strings to which the 26 source will be compared. 27 Return: 28 A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`. 29 """ 30 # IMPLEMENT_THIS 31 raise NotImplementedError( 32 "Please implement SentenceSimilarityPipeline __call__ function" 33 ) 34 [end of api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py] [start of api-inference-community/docker_images/spacy/app/main.py] 1 import functools 2 import logging 3 import os 4 from typing import Dict, Type 5 6 from api_inference_community.routes import pipeline_route, status_ok 7 from app.pipelines import ( 8 Pipeline, 9 TextClassificationPipeline, 10 TokenClassificationPipeline, 11 ) 12 from starlette.applications import Starlette 13 from starlette.middleware import Middleware 14 from starlette.middleware.gzip import GZipMiddleware 15 from starlette.routing import Route 16 17 18 TASK = os.getenv("TASK") 19 MODEL_ID = os.getenv("MODEL_ID") 20 21 22 logger = logging.getLogger(__name__) 23 24 25 # Add the allowed tasks 26 # Supported tasks are: 27 # - text-generation 28 # - text-classification 29 # - token-classification 30 # - translation 31 # - summarization 32 # - automatic-speech-recognition 33 # - ... 34 # For instance 35 # from app.pipelines import AutomaticSpeechRecognitionPipeline 36 # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} 37 # You can check the requirements and expectations of each pipelines in their respective 38 # directories. Implement directly within the directories. 39 ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { 40 "token-classification": TokenClassificationPipeline, 41 "text-classification": TextClassificationPipeline, 42 } 43 44 45 @functools.lru_cache() 46 def get_pipeline() -> Pipeline: 47 task = os.environ["TASK"] 48 model_id = os.environ["MODEL_ID"] 49 if task not in ALLOWED_TASKS: 50 raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") 51 return ALLOWED_TASKS[task](model_id) 52 53 54 routes = [ 55 Route("/{whatever:path}", status_ok), 56 Route("/{whatever:path}", pipeline_route, methods=["POST"]), 57 ] 58 59 middleware = [Middleware(GZipMiddleware, minimum_size=1000)] 60 if os.environ.get("DEBUG", "") == "1": 61 from starlette.middleware.cors import CORSMiddleware 62 63 middleware.append( 64 Middleware( 65 CORSMiddleware, 66 allow_origins=["*"], 67 allow_headers=["*"], 68 allow_methods=["*"], 69 ) 70 ) 71 72 app = Starlette(routes=routes, middleware=middleware) 73 74 75 @app.on_event("startup") 76 async def startup_event(): 77 logger = logging.getLogger("uvicorn.access") 78 handler = logging.StreamHandler() 79 handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) 80 logger.handlers = [handler] 81 82 # Link between `api-inference-community` and framework code. 83 app.get_pipeline = get_pipeline 84 try: 85 get_pipeline() 86 except Exception: 87 # We can fail so we can show exception later. 88 pass 89 90 91 if __name__ == "__main__": 92 try: 93 get_pipeline() 94 except Exception: 95 # We can fail so we can show exception later. 96 pass 97 [end of api-inference-community/docker_images/spacy/app/main.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/api-inference-community/docker_images/spacy/app/main.py b/api-inference-community/docker_images/spacy/app/main.py --- a/api-inference-community/docker_images/spacy/app/main.py +++ b/api-inference-community/docker_images/spacy/app/main.py @@ -6,6 +6,7 @@ from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( Pipeline, + SentenceSimilarityPipeline, TextClassificationPipeline, TokenClassificationPipeline, ) @@ -30,6 +31,7 @@ # - translation # - summarization # - automatic-speech-recognition +# - sentence-similarity # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline @@ -39,6 +41,7 @@ ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "token-classification": TokenClassificationPipeline, "text-classification": TextClassificationPipeline, + "sentence-similarity": SentenceSimilarityPipeline, } diff --git a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py --- a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py +++ b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py @@ -1,3 +1,6 @@ +import os +import subprocess +import sys from typing import Dict, List, Union from app.pipelines import Pipeline @@ -8,14 +11,23 @@ self, model_id: str, ): - # IMPLEMENT_THIS - # Preload all the elements you are going to need at inference. - # For instance your model, processors, tokenizer that might be needed. - # This function is only called once, so do all the heavy processing I/O here - raise NotImplementedError( - "Please implement SentenceSimilarityPipeline __init__ function" + # At the time, only public models from spaCy are allowed in the inference API. + full_model_path = model_id.split("/") + if len(full_model_path) != 2: + raise ValueError( + f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)" + ) + namespace, model_name = full_model_path + package = f"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl" + cache_dir = os.environ["PIP_CACHE"] + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package] ) + import spacy + + self.model = spacy.load(model_name) + def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]: """ Args: @@ -27,7 +39,12 @@ Return: A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`. """ - # IMPLEMENT_THIS - raise NotImplementedError( - "Please implement SentenceSimilarityPipeline __call__ function" - ) + source_sentence = inputs["source_sentence"] + source_doc = self.model(source_sentence) + + similarities = [] + for sentence in inputs["sentences"]: + search_doc = self.model(sentence) + similarities.append(source_doc.similarity(search_doc)) + + return similarities
{"golden_diff": "diff --git a/api-inference-community/docker_images/spacy/app/main.py b/api-inference-community/docker_images/spacy/app/main.py\n--- a/api-inference-community/docker_images/spacy/app/main.py\n+++ b/api-inference-community/docker_images/spacy/app/main.py\n@@ -6,6 +6,7 @@\n from api_inference_community.routes import pipeline_route, status_ok\n from app.pipelines import (\n Pipeline,\n+ SentenceSimilarityPipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n )\n@@ -30,6 +31,7 @@\n # - translation\n # - summarization\n # - automatic-speech-recognition\n+# - sentence-similarity\n # - ...\n # For instance\n # from app.pipelines import AutomaticSpeechRecognitionPipeline\n@@ -39,6 +41,7 @@\n ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n+ \"sentence-similarity\": SentenceSimilarityPipeline,\n }\n \n \ndiff --git a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n--- a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n+++ b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n@@ -1,3 +1,6 @@\n+import os\n+import subprocess\n+import sys\n from typing import Dict, List, Union\n \n from app.pipelines import Pipeline\n@@ -8,14 +11,23 @@\n self,\n model_id: str,\n ):\n- # IMPLEMENT_THIS\n- # Preload all the elements you are going to need at inference.\n- # For instance your model, processors, tokenizer that might be needed.\n- # This function is only called once, so do all the heavy processing I/O here\n- raise NotImplementedError(\n- \"Please implement SentenceSimilarityPipeline __init__ function\"\n+ # At the time, only public models from spaCy are allowed in the inference API.\n+ full_model_path = model_id.split(\"/\")\n+ if len(full_model_path) != 2:\n+ raise ValueError(\n+ f\"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)\"\n+ )\n+ namespace, model_name = full_model_path\n+ package = f\"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl\"\n+ cache_dir = os.environ[\"PIP_CACHE\"]\n+ subprocess.check_call(\n+ [sys.executable, \"-m\", \"pip\", \"install\", \"--cache-dir\", cache_dir, package]\n )\n \n+ import spacy\n+\n+ self.model = spacy.load(model_name)\n+\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n@@ -27,7 +39,12 @@\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n- # IMPLEMENT_THIS\n- raise NotImplementedError(\n- \"Please implement SentenceSimilarityPipeline __call__ function\"\n- )\n+ source_sentence = inputs[\"source_sentence\"]\n+ source_doc = self.model(source_sentence)\n+\n+ similarities = []\n+ for sentence in inputs[\"sentences\"]:\n+ search_doc = self.model(sentence)\n+ similarities.append(source_doc.similarity(search_doc))\n+\n+ return similarities\n", "issue": "Add sentence similarity for spaCy\nComputing similarity is simple with `spaCy` and there is an existing [template pipeline](https://github.com/huggingface/huggingface_hub/blob/main/api-inference-community/docker_images/common/app/pipelines/sentence_similarity.py) for sentence similarity at which can be modified for this. \r\n\r\nhttps://spacy.io/api/doc#similarity\n", "before_files": [{"content": "from typing import Dict, List, Union\n\nfrom app.pipelines import Pipeline\n\n\nclass SentenceSimilarityPipeline(Pipeline):\n def __init__(\n self,\n model_id: str,\n ):\n # IMPLEMENT_THIS\n # Preload all the elements you are going to need at inference.\n # For instance your model, processors, tokenizer that might be needed.\n # This function is only called once, so do all the heavy processing I/O here\n raise NotImplementedError(\n \"Please implement SentenceSimilarityPipeline __init__ function\"\n )\n\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n inputs (:obj:`dict`):\n a dictionary containing two keys, 'source_sentence' mapping\n to the sentence that will be compared against all the others,\n and 'sentences', mapping to a list of strings to which the\n source will be compared.\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n # IMPLEMENT_THIS\n raise NotImplementedError(\n \"Please implement SentenceSimilarityPipeline __call__ function\"\n )\n", "path": "api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py"}, {"content": "import functools\nimport logging\nimport os\nfrom typing import Dict, Type\n\nfrom api_inference_community.routes import pipeline_route, status_ok\nfrom app.pipelines import (\n Pipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n)\nfrom starlette.applications import Starlette\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.gzip import GZipMiddleware\nfrom starlette.routing import Route\n\n\nTASK = os.getenv(\"TASK\")\nMODEL_ID = os.getenv(\"MODEL_ID\")\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Add the allowed tasks\n# Supported tasks are:\n# - text-generation\n# - text-classification\n# - token-classification\n# - translation\n# - summarization\n# - automatic-speech-recognition\n# - ...\n# For instance\n# from app.pipelines import AutomaticSpeechRecognitionPipeline\n# ALLOWED_TASKS = {\"automatic-speech-recognition\": AutomaticSpeechRecognitionPipeline}\n# You can check the requirements and expectations of each pipelines in their respective\n# directories. Implement directly within the directories.\nALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n}\n\n\[email protected]_cache()\ndef get_pipeline() -> Pipeline:\n task = os.environ[\"TASK\"]\n model_id = os.environ[\"MODEL_ID\"]\n if task not in ALLOWED_TASKS:\n raise EnvironmentError(f\"{task} is not a valid pipeline for model : {model_id}\")\n return ALLOWED_TASKS[task](model_id)\n\n\nroutes = [\n Route(\"/{whatever:path}\", status_ok),\n Route(\"/{whatever:path}\", pipeline_route, methods=[\"POST\"]),\n]\n\nmiddleware = [Middleware(GZipMiddleware, minimum_size=1000)]\nif os.environ.get(\"DEBUG\", \"\") == \"1\":\n from starlette.middleware.cors import CORSMiddleware\n\n middleware.append(\n Middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_headers=[\"*\"],\n allow_methods=[\"*\"],\n )\n )\n\napp = Starlette(routes=routes, middleware=middleware)\n\n\[email protected]_event(\"startup\")\nasync def startup_event():\n logger = logging.getLogger(\"uvicorn.access\")\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n logger.handlers = [handler]\n\n # Link between `api-inference-community` and framework code.\n app.get_pipeline = get_pipeline\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n\n\nif __name__ == \"__main__\":\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n", "path": "api-inference-community/docker_images/spacy/app/main.py"}]}
1,752
777
gh_patches_debug_29797
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1953
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error handler should not capture exceptions in DEBUG mode ## Description Based on the [conclusion from the discussion](https://github.com/centerofci/mathesar/discussions/1150). Improve backend's debugability via two changes: 1. when backend is in debug-mode, add a pretty-printed stacktrace alongside the current error JSON; 2. stop wrapping unhandled exceptions in 4999 errors. For some context, we're catching backend exceptions and wrapping them in custom errors so as to provide a uniform error interface to API clients. As it is now it has detrimental effects on debugging, because the resulting error messages don't include stacktraces. Above changes aim to address that. </issue> <code> [start of mathesar/exception_handlers.py] 1 import warnings 2 3 from django.conf import settings 4 from django.db import IntegrityError as DjangoIntegrityError 5 from django.utils.encoding import force_str 6 from rest_framework.views import exception_handler 7 from rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT 8 from sqlalchemy.exc import IntegrityError, ProgrammingError 9 10 from db.types.exceptions import UnsupportedTypeException 11 from mathesar.api.exceptions.database_exceptions import ( 12 base_exceptions as base_api_exceptions, 13 exceptions as database_api_exceptions, 14 ) 15 from mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions 16 from mathesar.api.exceptions.error_codes import ErrorCodes 17 from mathesar.api.exceptions.exception_mappers import integrity_error_mapper 18 from mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception 19 from mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError 20 21 exception_map = { 22 IntegrityError: integrity_error_mapper, 23 DjangoIntegrityError: integrity_error_mapper, 24 UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc), 25 ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc), 26 URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc), 27 URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc), 28 URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc) 29 } 30 31 32 def standardize_error_response(data): 33 for index, error in enumerate(data): 34 if 'code' in error: 35 if error['code'] is not None and str(error['code']) != 'None': 36 data[index]['code'] = int(error['code']) 37 else: 38 data[index]['code'] = ErrorCodes.UnknownError.value 39 if 'detail' not in error: 40 data[index]['detail'] = error.pop('details', {}) 41 return data 42 43 44 def mathesar_exception_handler(exc, context): 45 response = exception_handler(exc, context) 46 # DRF default exception handler does not handle non API errors, 47 # So we convert it to proper API response 48 if not response: 49 # Check if we have an equivalent API exception that is able to convert the exception to proper error 50 mapped_exception_class = exception_map.get(exc.__class__) 51 if mapped_exception_class is None: 52 if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False): 53 mapped_exception_class = get_default_api_exception 54 else: 55 raise exc 56 api_exception = mapped_exception_class(exc) 57 response = exception_handler(api_exception, context) 58 59 if response is not None: 60 # Check if conforms to the api spec 61 if is_pretty(response.data): 62 # Validation exception converts error_codes from integer to string, we need to convert it back into 63 response.data = standardize_error_response(response.data) 64 return response 65 # Certain error raised by drf automatically don't follow the api error spec, 66 # so we convert those into proper format 67 else: 68 warnings.warn("Error Response does not conform to the api spec. Please handle the exception properly") 69 error_code = FRIENDLY_EXCEPTION_DICT.get( 70 exc.__class__.__name__, None 71 ) 72 if error_code is None and settings.MATHESAR_MODE != "PRODUCTION": 73 raise Exception("Error Response does not conform to the api spec. Please handle the exception properly") 74 75 # Errors thrown by drf automatically 76 if isinstance(response.data, dict): 77 error_message = response.data.pop('detail', '') 78 79 response_data = {} 80 response_data['code'] = error_code 81 response_data['message'] = error_message 82 response_data['details'] = {'exception': force_str(exc)} 83 response.data = [response_data] 84 return response 85 86 87 def is_pretty(data): 88 if not isinstance(data, list): 89 return False 90 else: 91 for error_details in data: 92 if ( 93 not isinstance(error_details, dict) 94 or 'code' not in error_details 95 or 'message' not in error_details 96 ): 97 return False 98 return True 99 [end of mathesar/exception_handlers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/exception_handlers.py b/mathesar/exception_handlers.py --- a/mathesar/exception_handlers.py +++ b/mathesar/exception_handlers.py @@ -1,4 +1,5 @@ import warnings +import traceback from django.conf import settings from django.db import IntegrityError as DjangoIntegrityError @@ -38,6 +39,9 @@ data[index]['code'] = ErrorCodes.UnknownError.value if 'detail' not in error: data[index]['detail'] = error.pop('details', {}) + # Adds a Stack-trace of the error for better debugging + if settings.MATHESAR_MODE == 'DEVELOPMENT': + data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc()) return data @@ -80,6 +84,9 @@ response_data['code'] = error_code response_data['message'] = error_message response_data['details'] = {'exception': force_str(exc)} + # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT + if settings.MATHESAR_MODE == 'DEVELOPMENT': + response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc()) response.data = [response_data] return response @@ -96,3 +103,8 @@ ): return False return True + + +def reformat_stacktrace(stacktrace): + stacktrace_list = stacktrace.splitlines()[1:] + return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]
{"golden_diff": "diff --git a/mathesar/exception_handlers.py b/mathesar/exception_handlers.py\n--- a/mathesar/exception_handlers.py\n+++ b/mathesar/exception_handlers.py\n@@ -1,4 +1,5 @@\n import warnings\n+import traceback\n \n from django.conf import settings\n from django.db import IntegrityError as DjangoIntegrityError\n@@ -38,6 +39,9 @@\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n+ # Adds a Stack-trace of the error for better debugging\n+ if settings.MATHESAR_MODE == 'DEVELOPMENT':\n+ data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n return data\n \n \n@@ -80,6 +84,9 @@\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n+ # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT\n+ if settings.MATHESAR_MODE == 'DEVELOPMENT':\n+ response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n response.data = [response_data]\n return response\n \n@@ -96,3 +103,8 @@\n ):\n return False\n return True\n+\n+\n+def reformat_stacktrace(stacktrace):\n+ stacktrace_list = stacktrace.splitlines()[1:]\n+ return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]\n", "issue": "Error handler should not capture exceptions in DEBUG mode\n## Description\r\nBased on the [conclusion from the discussion](https://github.com/centerofci/mathesar/discussions/1150).\r\n\r\nImprove backend's debugability via two changes:\r\n1. when backend is in debug-mode, add a pretty-printed stacktrace alongside the current error JSON;\r\n2. stop wrapping unhandled exceptions in 4999 errors.\r\n\r\nFor some context, we're catching backend exceptions and wrapping them in custom errors so as to provide a uniform error interface to API clients. As it is now it has detrimental effects on debugging, because the resulting error messages don't include stacktraces. Above changes aim to address that.\n", "before_files": [{"content": "import warnings\n\nfrom django.conf import settings\nfrom django.db import IntegrityError as DjangoIntegrityError\nfrom django.utils.encoding import force_str\nfrom rest_framework.views import exception_handler\nfrom rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT\nfrom sqlalchemy.exc import IntegrityError, ProgrammingError\n\nfrom db.types.exceptions import UnsupportedTypeException\nfrom mathesar.api.exceptions.database_exceptions import (\n base_exceptions as base_api_exceptions,\n exceptions as database_api_exceptions,\n)\nfrom mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.exception_mappers import integrity_error_mapper\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception\nfrom mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError\n\nexception_map = {\n IntegrityError: integrity_error_mapper,\n DjangoIntegrityError: integrity_error_mapper,\n UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc),\n ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc),\n URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc),\n URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc),\n URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc)\n}\n\n\ndef standardize_error_response(data):\n for index, error in enumerate(data):\n if 'code' in error:\n if error['code'] is not None and str(error['code']) != 'None':\n data[index]['code'] = int(error['code'])\n else:\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n return data\n\n\ndef mathesar_exception_handler(exc, context):\n response = exception_handler(exc, context)\n # DRF default exception handler does not handle non API errors,\n # So we convert it to proper API response\n if not response:\n # Check if we have an equivalent API exception that is able to convert the exception to proper error\n mapped_exception_class = exception_map.get(exc.__class__)\n if mapped_exception_class is None:\n if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False):\n mapped_exception_class = get_default_api_exception\n else:\n raise exc\n api_exception = mapped_exception_class(exc)\n response = exception_handler(api_exception, context)\n\n if response is not None:\n # Check if conforms to the api spec\n if is_pretty(response.data):\n # Validation exception converts error_codes from integer to string, we need to convert it back into\n response.data = standardize_error_response(response.data)\n return response\n # Certain error raised by drf automatically don't follow the api error spec,\n # so we convert those into proper format\n else:\n warnings.warn(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n error_code = FRIENDLY_EXCEPTION_DICT.get(\n exc.__class__.__name__, None\n )\n if error_code is None and settings.MATHESAR_MODE != \"PRODUCTION\":\n raise Exception(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n\n # Errors thrown by drf automatically\n if isinstance(response.data, dict):\n error_message = response.data.pop('detail', '')\n\n response_data = {}\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n response.data = [response_data]\n return response\n\n\ndef is_pretty(data):\n if not isinstance(data, list):\n return False\n else:\n for error_details in data:\n if (\n not isinstance(error_details, dict)\n or 'code' not in error_details\n or 'message' not in error_details\n ):\n return False\n return True\n", "path": "mathesar/exception_handlers.py"}]}
1,726
357
gh_patches_debug_3010
rasdani/github-patches
git_diff
cocotb__cocotb-208
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Redhat 6.5 can no longer raise a TestError Regressions report pass but number of tests has gone done on some simulators. Icarus for instance shows this. ``` 0.00ns INFO  cocotb.gpi gpi_embed.c:213 in embed_sim_init Running on Icarus Verilog version 0.10.0 (devel) 0.00ns INFO  cocotb.gpi gpi_embed.c:214 in embed_sim_init Python interpreter initialised and cocotb loaded! 0.00ns INFO  cocotb.gpi __init__.py:96 in _initialise_testbench Seeding Python random module with 1421853826 0.00ns INFO  cocotb.gpi __init__.py:110 in _initialise_testbench Running tests with Cocotb v0.5a from /var/lib/jenkins/workspace/cocotb_icarus_x86_64 0.00ns ERROR  cocotb.coroutine.fail decorators.py:99 in __init__ test_duplicate_yield isn't a value coroutine! Did you use the yield keyword? Traceback (most recent call last): File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/__init__.py", line 128, in _initialise_testbench regression.initialise() File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/regression.py", line 123, in initialise test = thing(self._dut) File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/decorators.py", line 356, in _wrapped_test raise_error(self, str(e)) File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/result.py", line 42, in raise_error if sys.version_info.major >= 3: AttributeError: 'tuple' object has no attribute 'major' ``` </issue> <code> [start of cocotb/result.py] 1 ''' Copyright (c) 2013 Potential Ventures Ltd 2 Copyright (c) 2013 SolarFlare Communications Inc 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 * Redistributions of source code must retain the above copyright 8 notice, this list of conditions and the following disclaimer. 9 * Redistributions in binary form must reproduce the above copyright 10 notice, this list of conditions and the following disclaimer in the 11 documentation and/or other materials provided with the distribution. 12 * Neither the name of Potential Ventures Ltd, 13 SolarFlare Communications Inc nor the 14 names of its contributors may be used to endorse or promote products 15 derived from this software without specific prior written permission. 16 17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY 21 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' 27 28 # TODO: Coule use cStringIO? 29 import traceback 30 import sys 31 #from StringIO import StringIO 32 from io import StringIO, BytesIO 33 34 def raise_error(obj, msg): 35 """ 36 Creates a TestError exception and raises it after printing a traceback 37 38 obj has a log method 39 msg is a string 40 """ 41 exc_type, exc_value, exc_traceback = sys.exc_info() 42 if sys.version_info.major >= 3: 43 buff = StringIO() 44 traceback.print_tb(exc_traceback, file=buff) 45 else: 46 buff_bytes = BytesIO() 47 traceback.print_tb(exc_traceback, file=buff_bytes) 48 buff = StringIO(buff_bytes.getvalue().decode("UTF-8")) 49 obj.log.error("%s\n%s" % (msg, buff.getvalue())) 50 exception = TestError(msg) 51 exception.stderr.write(buff.getvalue()) 52 raise exception 53 54 def create_error(obj, msg): 55 """ 56 As above, but return the exception rather than raise it, simply to avoid 57 too many levels of nested try/except blocks 58 """ 59 try: 60 raise_error(obj, msg) 61 except TestError as error: 62 return error 63 return TestError("Creating error traceback failed") 64 65 66 class ReturnValue(StopIteration): 67 def __init__(self, retval): 68 self.retval = retval 69 70 class TestComplete(StopIteration): 71 """ 72 Exceptions are used to pass test results around. 73 """ 74 def __init__(self, *args, **kwargs): 75 super(TestComplete, self).__init__(*args, **kwargs) 76 self.stdout = StringIO() 77 self.stderr = StringIO() 78 79 class TestError(TestComplete): pass 80 81 class TestFailure(TestComplete): pass 82 83 class TestSuccess(TestComplete): pass 84 85 class SimFailure(TestComplete): pass 86 [end of cocotb/result.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cocotb/result.py b/cocotb/result.py --- a/cocotb/result.py +++ b/cocotb/result.py @@ -39,7 +39,8 @@ msg is a string """ exc_type, exc_value, exc_traceback = sys.exc_info() - if sys.version_info.major >= 3: + # 2.6 cannot use named access + if sys.version_info[0] >= 3: buff = StringIO() traceback.print_tb(exc_traceback, file=buff) else:
{"golden_diff": "diff --git a/cocotb/result.py b/cocotb/result.py\n--- a/cocotb/result.py\n+++ b/cocotb/result.py\n@@ -39,7 +39,8 @@\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n- if sys.version_info.major >= 3:\n+ # 2.6 cannot use named access\n+ if sys.version_info[0] >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n", "issue": "Redhat 6.5 can no longer raise a TestError\nRegressions report pass but number of tests has gone done on some simulators. Icarus for instance shows this.\n\n```\n0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi gpi_embed.c:213 in embed_sim_init \u001b[34mRunning on Icarus Verilog version 0.10.0 (devel)\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi gpi_embed.c:214 in embed_sim_init \u001b[34mPython interpreter initialised and cocotb loaded!\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi __init__.py:96 in _initialise_testbench \u001b[34mSeeding Python random module with 1421853826\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi __init__.py:110 in _initialise_testbench \u001b[34mRunning tests with Cocotb v0.5a from /var/lib/jenkins/workspace/cocotb_icarus_x86_64\u001b[39m\n 0.00ns \u001b[31mERROR \u001b[39m cocotb.coroutine.fail decorators.py:99 in __init__ \u001b[31mtest_duplicate_yield isn't a value coroutine! Did you use the yield keyword?\u001b[39m\nTraceback (most recent call last):\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/__init__.py\", line 128, in _initialise_testbench\n regression.initialise()\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/regression.py\", line 123, in initialise\n test = thing(self._dut)\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/decorators.py\", line 356, in _wrapped_test\n raise_error(self, str(e))\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/result.py\", line 42, in raise_error\n if sys.version_info.major >= 3:\nAttributeError: 'tuple' object has no attribute 'major'\n```\n\n", "before_files": [{"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n# TODO: Coule use cStringIO?\nimport traceback\nimport sys\n#from StringIO import StringIO\nfrom io import StringIO, BytesIO\n\ndef raise_error(obj, msg):\n \"\"\"\n Creates a TestError exception and raises it after printing a traceback\n\n obj has a log method\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n if sys.version_info.major >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n buff_bytes = BytesIO()\n traceback.print_tb(exc_traceback, file=buff_bytes)\n buff = StringIO(buff_bytes.getvalue().decode(\"UTF-8\"))\n obj.log.error(\"%s\\n%s\" % (msg, buff.getvalue()))\n exception = TestError(msg)\n exception.stderr.write(buff.getvalue())\n raise exception\n\ndef create_error(obj, msg):\n \"\"\"\n As above, but return the exception rather than raise it, simply to avoid\n too many levels of nested try/except blocks\n \"\"\"\n try:\n raise_error(obj, msg)\n except TestError as error:\n return error\n return TestError(\"Creating error traceback failed\")\n\n\nclass ReturnValue(StopIteration):\n def __init__(self, retval):\n self.retval = retval\n\nclass TestComplete(StopIteration):\n \"\"\"\n Exceptions are used to pass test results around.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(TestComplete, self).__init__(*args, **kwargs)\n self.stdout = StringIO()\n self.stderr = StringIO()\n\nclass TestError(TestComplete): pass\n\nclass TestFailure(TestComplete): pass\n\nclass TestSuccess(TestComplete): pass\n\nclass SimFailure(TestComplete): pass\n", "path": "cocotb/result.py"}]}
1,978
128
gh_patches_debug_19252
rasdani/github-patches
git_diff
Kinto__kinto-658
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [cliquet] Add a way to change the port used when using kinto start. original: https://github.com/mozilla-services/cliquet/issues/686 All in title </issue> <code> [start of kinto/__main__.py] 1 from __future__ import print_function 2 import argparse 3 import os 4 import sys 5 6 from six.moves import input 7 from kinto.core import scripts 8 from pyramid.scripts import pserve 9 from pyramid.paster import bootstrap 10 from kinto import __version__ 11 from kinto.config import init 12 13 CONFIG_FILE = 'config/kinto.ini' 14 15 16 def main(args=None): 17 """The main routine.""" 18 if args is None: 19 args = sys.argv[1:] 20 21 parser = argparse.ArgumentParser(description="Kinto commands") 22 parser.add_argument('--ini', 23 help='Application configuration file', 24 dest='ini_file', 25 required=False, 26 default=CONFIG_FILE) 27 parser.add_argument('--backend', 28 help='Specify backend', 29 dest='backend', 30 required=False, 31 default=None) 32 33 parser.add_argument('-v', '--version', 34 action='version', version=__version__, 35 help='Print the Kinto version and exit.') 36 37 subparsers = parser.add_subparsers(title='subcommands', 38 description='valid subcommands', 39 dest='subcommand', 40 help='init/start/migrate') 41 subparsers.required = True 42 43 parser_init = subparsers.add_parser('init') 44 parser_init.set_defaults(which='init') 45 46 parser_migrate = subparsers.add_parser('migrate') 47 parser_migrate.set_defaults(which='migrate') 48 49 parser_start = subparsers.add_parser('start') 50 parser_start.add_argument('--reload', 51 action='store_true', 52 help='Restart when code or config changes', 53 required=False, 54 default=False) 55 parser_start.set_defaults(which='start') 56 57 parsed_args = vars(parser.parse_args(args)) 58 59 config_file = parsed_args['ini_file'] 60 61 if parsed_args['which'] == 'init': 62 if os.path.exists(config_file): 63 print("%s already exists." % config_file, file=sys.stderr) 64 return 1 65 66 backend = parsed_args['backend'] 67 if not backend: 68 while True: 69 prompt = ("Select the backend you would like to use: " 70 "(1 - postgresql, 2 - redis, default - memory) ") 71 answer = input(prompt).strip() 72 try: 73 backends = {"1": "postgresql", "2": "redis", "": "memory"} 74 backend = backends[answer] 75 break 76 except KeyError: 77 pass 78 79 init(config_file, backend) 80 81 # Install postgresql libraries if necessary 82 if backend == "postgresql": 83 try: 84 import psycopg2 # NOQA 85 except ImportError: 86 import pip 87 pip.main(['install', "kinto[postgresql]"]) 88 89 elif parsed_args['which'] == 'migrate': 90 env = bootstrap(config_file) 91 scripts.migrate(env) 92 93 elif parsed_args['which'] == 'start': 94 pserve_argv = ['pserve', config_file] 95 if parsed_args['reload']: 96 pserve_argv.append('--reload') 97 pserve.main(pserve_argv) 98 99 return 0 100 [end of kinto/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/__main__.py b/kinto/__main__.py --- a/kinto/__main__.py +++ b/kinto/__main__.py @@ -52,6 +52,11 @@ help='Restart when code or config changes', required=False, default=False) + parser_start.add_argument('--port', + type=int, + help='Listening port number', + required=False, + default=8888) parser_start.set_defaults(which='start') parsed_args = vars(parser.parse_args(args)) @@ -94,6 +99,7 @@ pserve_argv = ['pserve', config_file] if parsed_args['reload']: pserve_argv.append('--reload') + pserve_argv.append('http_port=%s' % parsed_args['port']) pserve.main(pserve_argv) return 0
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -52,6 +52,11 @@\n help='Restart when code or config changes',\n required=False,\n default=False)\n+ parser_start.add_argument('--port',\n+ type=int,\n+ help='Listening port number',\n+ required=False,\n+ default=8888)\n parser_start.set_defaults(which='start')\n \n parsed_args = vars(parser.parse_args(args))\n@@ -94,6 +99,7 @@\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n+ pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n \n return 0\n", "issue": "[cliquet] Add a way to change the port used when using kinto start.\noriginal: https://github.com/mozilla-services/cliquet/issues/686\n\nAll in title\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n dest='subcommand',\n help='init/start/migrate')\n subparsers.required = True\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n parser_start.set_defaults(which='start')\n\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n\n if parsed_args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n\n elif parsed_args['which'] == 'migrate':\n env = bootstrap(config_file)\n scripts.migrate(env)\n\n elif parsed_args['which'] == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve.main(pserve_argv)\n\n return 0\n", "path": "kinto/__main__.py"}]}
1,406
195
gh_patches_debug_11000
rasdani/github-patches
git_diff
sotetsuk__pgx-907
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add `pgx.__version__` </issue> <code> [start of setup.py] 1 from setuptools import find_packages, setup 2 from pathlib import Path 3 4 long_description = (Path(__file__).parent / "README.md").read_text() 5 6 setup( 7 name="pgx", 8 version="0.7.4", 9 description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)", 10 long_description=long_description, 11 long_description_content_type="text/markdown", 12 url="https://github.com/sotetsuk/pgx", 13 author="Sotetsu KOYAMADA", 14 author_email="[email protected]", 15 keywords="", 16 packages=find_packages(), 17 package_data={ 18 "": ["LICENSE", "*.svg", "_src/assets/*.npy", "_mahjong/*.json"] 19 }, 20 include_package_data=True, 21 install_requires=[ 22 "jax>=0.3.25", # JAX version on Colab (TPU) 23 "svgwrite", 24 "typing_extensions", 25 ], 26 classifiers=[ 27 "Programming Language :: Python :: 3.8", 28 "Programming Language :: Python :: 3.9", 29 "Programming Language :: Python :: 3.10", 30 ], 31 ) 32 [end of setup.py] [start of pgx/__init__.py] 1 from pgx._src.api_test import v1_api_test 2 from pgx._src.visualizer import ( 3 save_svg, 4 save_svg_animation, 5 set_visualization_config, 6 ) 7 from pgx.v1 import Env, EnvId, State, available_envs, make 8 9 __all__ = [ 10 # v1 api components 11 "State", 12 "Env", 13 "EnvId", 14 "make", 15 "available_envs", 16 # visualization 17 "set_visualization_config", 18 "save_svg", 19 "save_svg_animation", 20 # api tests 21 "v1_api_test", 22 ] 23 [end of pgx/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pgx/__init__.py b/pgx/__init__.py --- a/pgx/__init__.py +++ b/pgx/__init__.py @@ -6,6 +6,8 @@ ) from pgx.v1 import Env, EnvId, State, available_envs, make +__version__ = "0.8.0" + __all__ = [ # v1 api components "State", diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -5,7 +5,6 @@ setup( name="pgx", - version="0.7.4", description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)", long_description=long_description, long_description_content_type="text/markdown",
{"golden_diff": "diff --git a/pgx/__init__.py b/pgx/__init__.py\n--- a/pgx/__init__.py\n+++ b/pgx/__init__.py\n@@ -6,6 +6,8 @@\n )\n from pgx.v1 import Env, EnvId, State, available_envs, make\n \n+__version__ = \"0.8.0\"\n+\n __all__ = [\n # v1 api components\n \"State\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,6 @@\n \n setup(\n name=\"pgx\",\n- version=\"0.7.4\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "Add `pgx.__version__`\n\n", "before_files": [{"content": "from setuptools import find_packages, setup\nfrom pathlib import Path\n\nlong_description = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=\"pgx\",\n version=\"0.7.4\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sotetsuk/pgx\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\n \"\": [\"LICENSE\", \"*.svg\", \"_src/assets/*.npy\", \"_mahjong/*.json\"]\n },\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"svgwrite\",\n \"typing_extensions\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}, {"content": "from pgx._src.api_test import v1_api_test\nfrom pgx._src.visualizer import (\n save_svg,\n save_svg_animation,\n set_visualization_config,\n)\nfrom pgx.v1 import Env, EnvId, State, available_envs, make\n\n__all__ = [\n # v1 api components\n \"State\",\n \"Env\",\n \"EnvId\",\n \"make\",\n \"available_envs\",\n # visualization\n \"set_visualization_config\",\n \"save_svg\",\n \"save_svg_animation\",\n # api tests\n \"v1_api_test\",\n]\n", "path": "pgx/__init__.py"}]}
1,031
183
gh_patches_debug_36196
rasdani/github-patches
git_diff
conda__conda-3969
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invalid requirement while trying to use pip options Hi! I have in my pip section inside envrionment.yaml file this line ```- rep --install-option='--no-deps'``` while I am trying to update my environment I am getting this error ```Invalid requirement: 'rep --install-option='--no-deps''``` if I do pip -r requirements.txt and I have that line as it is in requirements.txt it works. </issue> <code> [start of conda_env/installers/pip.py] 1 from __future__ import absolute_import 2 import subprocess 3 from conda_env.pip_util import pip_args 4 from conda.exceptions import CondaValueError 5 6 7 def install(prefix, specs, args, env, prune=False): 8 pip_cmd = pip_args(prefix) + ['install', ] + specs 9 process = subprocess.Popen(pip_cmd, universal_newlines=True) 10 process.communicate() 11 12 if process.returncode != 0: 13 raise CondaValueError("pip returned an error.") 14 [end of conda_env/installers/pip.py] [start of conda/egg_info.py] 1 """ 2 Functions related to core conda functionality that relates to manually 3 installed Python packages, e.g. using "python setup.py install", or "pip". 4 """ 5 from __future__ import absolute_import, division, print_function, unicode_literals 6 7 from io import open 8 import os 9 from os.path import isdir, isfile, join 10 import re 11 import sys 12 13 from .common.compat import itervalues, on_win 14 from .core.linked_data import linked_data 15 from .misc import rel_path 16 from .models.dist import Dist 17 18 19 def get_site_packages_dir(installed_pkgs): 20 for info in itervalues(installed_pkgs): 21 if info['name'] == 'python': 22 if on_win: 23 stdlib_dir = 'Lib' 24 else: 25 py_ver = info['version'][:3] 26 stdlib_dir = 'lib/python%s' % py_ver 27 return join(stdlib_dir, 'site-packages') 28 return None 29 30 31 def get_egg_info_files(sp_dir): 32 for fn in os.listdir(sp_dir): 33 if not fn.endswith(('.egg', '.egg-info', '.dist-info')): 34 continue 35 path = join(sp_dir, fn) 36 if isfile(path): 37 yield path 38 elif isdir(path): 39 for path2 in [join(path, 'PKG-INFO'), 40 join(path, 'EGG-INFO', 'PKG-INFO'), 41 join(path, 'METADATA')]: 42 if isfile(path2): 43 yield path2 44 45 46 pat = re.compile(r'(\w+):\s*(\S+)', re.I) 47 def parse_egg_info(path): 48 """ 49 Parse an .egg-info file and return its canonical distribution name 50 """ 51 info = {} 52 for line in open(path, encoding='utf-8'): 53 line = line.strip() 54 m = pat.match(line) 55 if m: 56 key = m.group(1).lower() 57 info[key] = m.group(2) 58 try: 59 return '%(name)s-%(version)s-<pip>' % info 60 except KeyError: 61 pass 62 return None 63 64 65 def get_egg_info(prefix, all_pkgs=False): 66 """ 67 Return a set of canonical names of all Python packages (in `prefix`), 68 by inspecting the .egg-info files inside site-packages. 69 By default, only untracked (not conda installed) .egg-info files are 70 considered. Setting `all_pkgs` to True changes this. 71 """ 72 installed_pkgs = linked_data(prefix) 73 sp_dir = get_site_packages_dir(installed_pkgs) 74 if sp_dir is None: 75 return set() 76 77 conda_files = set() 78 for info in itervalues(installed_pkgs): 79 conda_files.update(info.get('files', [])) 80 81 res = set() 82 for path in get_egg_info_files(join(prefix, sp_dir)): 83 f = rel_path(prefix, path) 84 if all_pkgs or f not in conda_files: 85 try: 86 dist = parse_egg_info(path) 87 except UnicodeDecodeError: 88 dist = None 89 if dist: 90 res.add(Dist(dist)) 91 return res 92 93 94 if __name__ == '__main__': 95 from pprint import pprint 96 pprint(get_egg_info(sys.prefix)) 97 [end of conda/egg_info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda/egg_info.py b/conda/egg_info.py --- a/conda/egg_info.py +++ b/conda/egg_info.py @@ -30,6 +30,10 @@ def get_egg_info_files(sp_dir): for fn in os.listdir(sp_dir): + if fn.endswith('.egg-link'): + with open(join(sp_dir, fn), 'r') as reader: + for egg in get_egg_info_files(reader.readline().strip()): + yield egg if not fn.endswith(('.egg', '.egg-info', '.dist-info')): continue path = join(sp_dir, fn) diff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py --- a/conda_env/installers/pip.py +++ b/conda_env/installers/pip.py @@ -1,13 +1,56 @@ from __future__ import absolute_import + +import os +import os.path as op import subprocess +import tempfile from conda_env.pip_util import pip_args from conda.exceptions import CondaValueError -def install(prefix, specs, args, env, prune=False): - pip_cmd = pip_args(prefix) + ['install', ] + specs - process = subprocess.Popen(pip_cmd, universal_newlines=True) - process.communicate() +def _pip_install_via_requirements(prefix, specs, args, *_): + """ + Installs the pip dependencies in specs using a temporary pip requirements file. + + Args + ---- + prefix: string + The path to the python and pip executables. + + specs: iterable of strings + Each element should be a valid pip dependency. + See: https://pip.pypa.io/en/stable/user_guide/#requirements-files + https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format + """ + try: + pip_workdir = op.dirname(op.abspath(args.file)) + except AttributeError: + pip_workdir = None + requirements = None + try: + # Generate the temporary requirements file + requirements = tempfile.NamedTemporaryFile(mode='w', + prefix='condaenv.', + suffix='.requirements.txt', + dir=pip_workdir, + delete=False) + requirements.write('\n'.join(specs)) + requirements.close() + # pip command line... + pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name] + # ...run it + process = subprocess.Popen(pip_cmd, + cwd=pip_workdir, + universal_newlines=True) + process.communicate() + if process.returncode != 0: + raise CondaValueError("pip returned an error") + finally: + # Win/Appveyor does not like it if we use context manager + delete=True. + # So we delete the temporary file in a finally block. + if requirements is not None and op.isfile(requirements.name): + os.remove(requirements.name) + - if process.returncode != 0: - raise CondaValueError("pip returned an error.") +# Conform to Installers API +install = _pip_install_via_requirements
{"golden_diff": "diff --git a/conda/egg_info.py b/conda/egg_info.py\n--- a/conda/egg_info.py\n+++ b/conda/egg_info.py\n@@ -30,6 +30,10 @@\n \n def get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n+ if fn.endswith('.egg-link'):\n+ with open(join(sp_dir, fn), 'r') as reader:\n+ for egg in get_egg_info_files(reader.readline().strip()):\n+ yield egg\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\ndiff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py\n--- a/conda_env/installers/pip.py\n+++ b/conda_env/installers/pip.py\n@@ -1,13 +1,56 @@\n from __future__ import absolute_import\n+\n+import os\n+import os.path as op\n import subprocess\n+import tempfile\n from conda_env.pip_util import pip_args\n from conda.exceptions import CondaValueError\n \n \n-def install(prefix, specs, args, env, prune=False):\n- pip_cmd = pip_args(prefix) + ['install', ] + specs\n- process = subprocess.Popen(pip_cmd, universal_newlines=True)\n- process.communicate()\n+def _pip_install_via_requirements(prefix, specs, args, *_):\n+ \"\"\"\n+ Installs the pip dependencies in specs using a temporary pip requirements file.\n+\n+ Args\n+ ----\n+ prefix: string\n+ The path to the python and pip executables.\n+\n+ specs: iterable of strings\n+ Each element should be a valid pip dependency.\n+ See: https://pip.pypa.io/en/stable/user_guide/#requirements-files\n+ https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format\n+ \"\"\"\n+ try:\n+ pip_workdir = op.dirname(op.abspath(args.file))\n+ except AttributeError:\n+ pip_workdir = None\n+ requirements = None\n+ try:\n+ # Generate the temporary requirements file\n+ requirements = tempfile.NamedTemporaryFile(mode='w',\n+ prefix='condaenv.',\n+ suffix='.requirements.txt',\n+ dir=pip_workdir,\n+ delete=False)\n+ requirements.write('\\n'.join(specs))\n+ requirements.close()\n+ # pip command line...\n+ pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]\n+ # ...run it\n+ process = subprocess.Popen(pip_cmd,\n+ cwd=pip_workdir,\n+ universal_newlines=True)\n+ process.communicate()\n+ if process.returncode != 0:\n+ raise CondaValueError(\"pip returned an error\")\n+ finally:\n+ # Win/Appveyor does not like it if we use context manager + delete=True.\n+ # So we delete the temporary file in a finally block.\n+ if requirements is not None and op.isfile(requirements.name):\n+ os.remove(requirements.name)\n+\n \n- if process.returncode != 0:\n- raise CondaValueError(\"pip returned an error.\")\n+# Conform to Installers API\n+install = _pip_install_via_requirements\n", "issue": "Invalid requirement while trying to use pip options\nHi!\r\n\r\nI have in my pip section inside envrionment.yaml file this line\r\n```- rep --install-option='--no-deps'```\r\n\r\nwhile I am trying to update my environment I am getting this error\r\n\r\n```Invalid requirement: 'rep --install-option='--no-deps''```\r\n\r\nif I do pip -r requirements.txt and I have that line as it is in requirements.txt it works.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport subprocess\nfrom conda_env.pip_util import pip_args\nfrom conda.exceptions import CondaValueError\n\n\ndef install(prefix, specs, args, env, prune=False):\n pip_cmd = pip_args(prefix) + ['install', ] + specs\n process = subprocess.Popen(pip_cmd, universal_newlines=True)\n process.communicate()\n\n if process.returncode != 0:\n raise CondaValueError(\"pip returned an error.\")\n", "path": "conda_env/installers/pip.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom io import open\nimport os\nfrom os.path import isdir, isfile, join\nimport re\nimport sys\n\nfrom .common.compat import itervalues, on_win\nfrom .core.linked_data import linked_data\nfrom .misc import rel_path\nfrom .models.dist import Dist\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if on_win:\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(Dist(dist))\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}]}
1,666
716
gh_patches_debug_52514
rasdani/github-patches
git_diff
rucio__rucio-2079
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upgrade of dependencies for 1.19.0 Modification ------------ # pip-requires SQLAlchemy 1.2.7 --> 1.2.16 alembic 0.9.9 --> 1.0.6 python-dateutil 2.7.2 --> 2.7.5 s3cmd 2.0.1 --> 2.0.2 stomp.py 4.1.20 --> 4.1.21 dnspython 1.15.0 --> 1.16.0 geoip2 2.8.0 --> 2.9.0 maxminddb 1.3.0 --> 1.4.1 pycparser 2.18 --> 2.19 googleapis-common-protos 1.5.3 --> 1.5.6 httplib2 0.11.3 --> 0.12.0 pyOpenSSL 18.0.0 --> 19.0.0 cryptography 2.3.1 --> 2.4.2 oauth2client 4.1.2 --> 4.1.3 protobuf 3.5.2.post1 --> 3.6.1 grpcio 1.11.0 --> 1.18.0 pyasn1 0.4.2 --> 0.4.5 pyasn1-modules 0.2.1 --> 0.2.3 rsa 3.4.2 --> 4.0 redis 2.10.6 --> 3.0.1 numpy 1.14.2 --> 1.16.0 paramiko 2.4.1 --> 2.4.2 Flask 0.12.4 --> 1.0.2 idna 2.6 --> 2.8 MyProxyClient 2.0.1 --> 2.1.0 # pip-requires-test Paste 2.0.3 --> 3.0.6 coverage 4.4.2 --> 4.5.2 Sphinx 1.6.5 --> 1.8.3 sphinx-rtd-theme 0.2.4 --> 0.4.2 sphinxcontrib-httpdomain 1.6.0 --> 1.7.0 Pygments 2.2.0 --> 2.3.1 pyflakes 1.6.0 --> 2.0.0 flake8 3.5.0 --> 3.6.0 pylint 1.7.4 --> 2.2.2 virtualenv 15.1.0 --> 16.2.0 tox 2.9.1 --> 3.7.0 pytest 3.2.5 --> 4.1.1 pytest-xdist 1.20.1 --> 1.26.0 pytz 2017.3 --> 2018.9 Babel 2.5.1 --> 2.6.0 subprocess32 3.2.7 --> 3.5.3 pycodestyle 2.3.1 --> 2.4.0 </issue> <code> [start of lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py] 1 # Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 # Authors: 16 # - Martin Barisits <[email protected]>, 2019 17 # 18 # Topic: Replicas table PK definition is in wrong order 19 # Revision ID: 3345511706b8 20 # Revises: 9eb936a81eb1 21 # Creation Date: 2019-01-30 14:20:35.058889 22 23 from alembic.op import (create_primary_key, drop_constraint) 24 25 from alembic import context 26 27 28 # revision identifiers used by alembic 29 revision = '3345511706b8' # pylint: disable=invalid-name 30 down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name 31 32 33 def upgrade(): 34 ''' 35 Upgrade the database to this revision 36 ''' 37 if context.get_context().dialect.name != 'sqlite': 38 drop_constraint('REPLICAS_PK', 'replicas') 39 create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id']) 40 41 42 def downgrade(): 43 ''' 44 Downgrade the database to the previous revision 45 ''' 46 if context.get_context().dialect.name != 'sqlite': 47 drop_constraint('REPLICAS_PK', 'replicas') 48 create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name']) 49 [end of lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py --- a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py +++ b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py @@ -27,7 +27,7 @@ # revision identifiers used by alembic revision = '3345511706b8' # pylint: disable=invalid-name -down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name +down_revision = '01eaf73ab656' # pylint: disable=invalid-name def upgrade():
{"golden_diff": "diff --git a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n--- a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n+++ b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n@@ -27,7 +27,7 @@\n \n # revision identifiers used by alembic\n revision = '3345511706b8' # pylint: disable=invalid-name\n-down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name\n+down_revision = '01eaf73ab656' # pylint: disable=invalid-name\n \n \n def upgrade():\n", "issue": "Upgrade of dependencies for 1.19.0\nModification\r\n------------\r\n# pip-requires\r\nSQLAlchemy 1.2.7 --> 1.2.16\r\nalembic 0.9.9 --> 1.0.6\r\npython-dateutil 2.7.2 --> 2.7.5\r\ns3cmd 2.0.1 --> 2.0.2\r\nstomp.py 4.1.20 --> 4.1.21\r\ndnspython 1.15.0 --> 1.16.0\r\ngeoip2 2.8.0 --> 2.9.0\r\nmaxminddb 1.3.0 --> 1.4.1\r\npycparser 2.18 --> 2.19\r\ngoogleapis-common-protos 1.5.3 --> 1.5.6\r\nhttplib2 0.11.3 --> 0.12.0\r\npyOpenSSL 18.0.0 --> 19.0.0\r\ncryptography 2.3.1 --> 2.4.2\r\noauth2client 4.1.2 --> 4.1.3\r\nprotobuf 3.5.2.post1 --> 3.6.1\r\ngrpcio 1.11.0 --> 1.18.0\r\npyasn1 0.4.2 --> 0.4.5\r\npyasn1-modules 0.2.1 --> 0.2.3\r\nrsa 3.4.2 --> 4.0\r\nredis 2.10.6 --> 3.0.1\r\nnumpy 1.14.2 --> 1.16.0\r\nparamiko 2.4.1 --> 2.4.2\r\nFlask 0.12.4 --> 1.0.2\r\nidna 2.6 --> 2.8\r\nMyProxyClient 2.0.1 --> 2.1.0\r\n\r\n# pip-requires-test\r\nPaste 2.0.3 --> 3.0.6\r\ncoverage 4.4.2 --> 4.5.2\r\nSphinx 1.6.5 --> 1.8.3\r\nsphinx-rtd-theme 0.2.4 --> 0.4.2\r\nsphinxcontrib-httpdomain 1.6.0 --> 1.7.0\r\nPygments 2.2.0 --> 2.3.1\r\npyflakes 1.6.0 --> 2.0.0\r\nflake8 3.5.0 --> 3.6.0\r\npylint 1.7.4 --> 2.2.2\r\nvirtualenv 15.1.0 --> 16.2.0\r\ntox 2.9.1 --> 3.7.0\r\npytest 3.2.5 --> 4.1.1\r\npytest-xdist 1.20.1 --> 1.26.0\r\npytz 2017.3 --> 2018.9\r\nBabel 2.5.1 --> 2.6.0\r\nsubprocess32 3.2.7 --> 3.5.3\r\npycodestyle 2.3.1 --> 2.4.0\n", "before_files": [{"content": "# Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Martin Barisits <[email protected]>, 2019\n#\n# Topic: Replicas table PK definition is in wrong order\n# Revision ID: 3345511706b8\n# Revises: 9eb936a81eb1\n# Creation Date: 2019-01-30 14:20:35.058889\n\nfrom alembic.op import (create_primary_key, drop_constraint)\n\nfrom alembic import context\n\n\n# revision identifiers used by alembic\nrevision = '3345511706b8' # pylint: disable=invalid-name\ndown_revision = 'bf3baa1c1474' # pylint: disable=invalid-name\n\n\ndef upgrade():\n '''\n Upgrade the database to this revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])\n\n\ndef downgrade():\n '''\n Downgrade the database to the previous revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])\n", "path": "lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py"}]}
1,891
249
gh_patches_debug_1132
rasdani/github-patches
git_diff
scikit-hep__pyhf-1790
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Guard SCHEMA_VERSION from version bumps I don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support. This is going to be a problem when https://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2 hits `1.0.0` and conflicts with https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13 and also has to properly pick up the multiple correct instances in https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145 _Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_ </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789 7 'tensorflow-probability>=0.11.0', # c.f. PR #1657 8 ], 9 'torch': ['torch>=1.10.0'], # c.f. PR #1657 10 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501 11 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567 12 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306 13 } 14 extras_require['backends'] = sorted( 15 set( 16 extras_require['tensorflow'] 17 + extras_require['torch'] 18 + extras_require['jax'] 19 + extras_require['minuit'] 20 ) 21 ) 22 extras_require['contrib'] = sorted({'matplotlib', 'requests'}) 23 extras_require['lint'] = sorted({'flake8', 'black>=22.1.0'}) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + extras_require['shellcomplete'] 31 + [ 32 'scikit-hep-testdata>=0.4.11', 33 'pytest>=6.0', 34 'pytest-cov>=2.5.1', 35 'pytest-mock', 36 'requests-mock>=1.9.0', 37 'pytest-benchmark[histogram]', 38 'pytest-console-scripts', 39 'pytest-mpl', 40 'pydocstyle', 41 'papermill~=2.0', 42 'nteract-scrapbook~=0.2', 43 'jupyter', 44 'graphviz', 45 ] 46 ) 47 ) 48 extras_require['docs'] = sorted( 49 set( 50 extras_require['xmlio'] 51 + extras_require['contrib'] 52 + [ 53 'sphinx>=4.0.0', 54 'sphinxcontrib-bibtex~=2.1', 55 'sphinx-click', 56 'sphinx_rtd_theme', 57 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620 58 'ipywidgets', 59 'sphinx-issues', 60 'sphinx-copybutton>=0.3.2', 61 ] 62 ) 63 ) 64 extras_require['develop'] = sorted( 65 set( 66 extras_require['docs'] 67 + extras_require['lint'] 68 + extras_require['test'] 69 + [ 70 'nbdime', 71 'bump2version', 72 'ipython', 73 'pre-commit', 74 'check-manifest', 75 'codemetapy>=0.3.4', 76 'twine', 77 ] 78 ) 79 ) 80 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 81 82 83 setup( 84 extras_require=extras_require, 85 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 86 ) 87 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -68,7 +68,7 @@ + extras_require['test'] + [ 'nbdime', - 'bump2version', + 'tbump>=6.7.0', 'ipython', 'pre-commit', 'check-manifest',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,7 +68,7 @@\n + extras_require['test']\n + [\n 'nbdime',\n- 'bump2version',\n+ 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n", "issue": "Guard SCHEMA_VERSION from version bumps\nI don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support.\r\n\r\nThis is going to be a problem when \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2\r\n\r\nhits `1.0.0` and conflicts with \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13\r\n\r\nand also has to properly pick up the multiple correct instances in \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145\r\n\r\n_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,710
85
gh_patches_debug_2914
rasdani/github-patches
git_diff
gratipay__gratipay.com-2792
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> broken facebook link when no user_name If all we have is a user_id, we construct the URL improperly. In that case we need: `http://facebook.com/profile.php?id=$ID` But we have: `http://facebook.com/None` broken facebook link when no user_name If all we have is a user_id, we construct the URL improperly. In that case we need: `http://facebook.com/profile.php?id=$ID` But we have: `http://facebook.com/None` </issue> <code> [start of gratipay/elsewhere/facebook.py] 1 from __future__ import absolute_import, division, print_function, unicode_literals 2 3 from gratipay.elsewhere import PlatformOAuth2 4 from gratipay.elsewhere._extractors import key 5 6 7 class Facebook(PlatformOAuth2): 8 9 # Platform attributes 10 name = 'facebook' 11 display_name = 'Facebook' 12 account_url = 'https://www.facebook.com/{user_name}' 13 14 # Auth attributes 15 auth_url = 'https://www.facebook.com/dialog/oauth' 16 access_token_url = 'https://graph.facebook.com/oauth/access_token' 17 oauth_default_scope = ['public_profile,email'] 18 19 # API attributes 20 api_format = 'json' 21 api_url = 'https://graph.facebook.com' 22 api_user_info_path = '/{user_name}' 23 api_user_self_info_path = '/me' 24 25 # User info extractors 26 x_user_id = key('id') 27 x_user_name = key('username') 28 x_display_name = key('name') 29 x_email = key('email') 30 31 def x_avatar_url(self, extracted, info, default): 32 return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256' 33 [end of gratipay/elsewhere/facebook.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/elsewhere/facebook.py b/gratipay/elsewhere/facebook.py --- a/gratipay/elsewhere/facebook.py +++ b/gratipay/elsewhere/facebook.py @@ -9,7 +9,7 @@ # Platform attributes name = 'facebook' display_name = 'Facebook' - account_url = 'https://www.facebook.com/{user_name}' + account_url = 'https://www.facebook.com/profile.php?id={user_id}' # Auth attributes auth_url = 'https://www.facebook.com/dialog/oauth'
{"golden_diff": "diff --git a/gratipay/elsewhere/facebook.py b/gratipay/elsewhere/facebook.py\n--- a/gratipay/elsewhere/facebook.py\n+++ b/gratipay/elsewhere/facebook.py\n@@ -9,7 +9,7 @@\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n- account_url = 'https://www.facebook.com/{user_name}'\n+ account_url = 'https://www.facebook.com/profile.php?id={user_id}'\n \n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n", "issue": "broken facebook link when no user_name\nIf all we have is a user_id, we construct the URL improperly. In that case we need:\n\n`http://facebook.com/profile.php?id=$ID`\n\nBut we have:\n\n`http://facebook.com/None`\n\nbroken facebook link when no user_name\nIf all we have is a user_id, we construct the URL improperly. In that case we need:\n\n`http://facebook.com/profile.php?id=$ID`\n\nBut we have:\n\n`http://facebook.com/None`\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth2\nfrom gratipay.elsewhere._extractors import key\n\n\nclass Facebook(PlatformOAuth2):\n\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n account_url = 'https://www.facebook.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n oauth_default_scope = ['public_profile,email']\n\n # API attributes\n api_format = 'json'\n api_url = 'https://graph.facebook.com'\n api_user_info_path = '/{user_name}'\n api_user_self_info_path = '/me'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n\n def x_avatar_url(self, extracted, info, default):\n return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'\n", "path": "gratipay/elsewhere/facebook.py"}]}
959
125
gh_patches_debug_28821
rasdani/github-patches
git_diff
huggingface__text-generation-inference-114
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add typical sampling HF has TypicalWarper and `typical_p`. Would be nice to be able to pass that. </issue> <code> [start of server/text_generation_server/utils/tokens.py] 1 import re 2 import torch 3 4 from transformers import ( 5 LogitsProcessorList, 6 TemperatureLogitsWarper, 7 TopKLogitsWarper, 8 TopPLogitsWarper, 9 RepetitionPenaltyLogitsProcessor, 10 PreTrainedTokenizerBase, 11 ) 12 from typing import List, Tuple, Optional 13 14 from text_generation_server.pb import generate_pb2 15 from text_generation_server.pb.generate_pb2 import FinishReason 16 from text_generation_server.utils.watermark import WatermarkLogitsProcessor 17 18 19 class Sampling: 20 def __init__(self, seed: int, device: str = "cpu"): 21 self.generator = torch.Generator(device) 22 self.generator.manual_seed(seed) 23 self.seed = seed 24 25 def __call__(self, logits): 26 probs = torch.nn.functional.softmax(logits) 27 next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator) 28 return next_tokens 29 30 31 class Greedy: 32 def __call__(self, logits): 33 return logits.argmax() 34 35 36 class NextTokenChooser: 37 def __init__( 38 self, 39 watermark=False, 40 temperature=1.0, 41 repetition_penalty=1.0, 42 top_k=None, 43 top_p=None, 44 do_sample=False, 45 seed=0, 46 device="cpu", 47 ): 48 warpers = LogitsProcessorList() 49 # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files 50 # all samplers can be found in `generation_utils_samplers.py` 51 sampling = do_sample 52 53 if watermark: 54 warpers.append(WatermarkLogitsProcessor(device=device)) 55 if repetition_penalty is not None and repetition_penalty != 1.0: 56 warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) 57 if temperature is not None and temperature != 1.0: 58 temperature = float(temperature) 59 warpers.append(TemperatureLogitsWarper(temperature)) 60 sampling = True 61 if top_k is not None and top_k != 0: 62 warpers.append(TopKLogitsWarper(top_k=top_k)) 63 sampling = True 64 if top_p is not None and top_p < 1.0: 65 warpers.append(TopPLogitsWarper(top_p=top_p)) 66 sampling = True 67 68 self.warpers = warpers 69 self.choice = Sampling(seed, device) if sampling else Greedy() 70 71 def __call__(self, input_ids, scores): 72 # Warp logits 73 scores = self.warpers(input_ids, scores) 74 75 # Compute logprobs 76 logprobs = torch.log_softmax(scores, -1) 77 78 # Choose tokens 79 next_id = self.choice(scores[-1]) 80 81 return next_id.view(1, 1), logprobs 82 83 @classmethod 84 def from_pb( 85 cls, 86 pb: generate_pb2.NextTokenChooserParameters, 87 device: torch.device, 88 ) -> "NextTokenChooser": 89 return NextTokenChooser( 90 watermark=pb.watermark, 91 temperature=pb.temperature, 92 repetition_penalty=pb.repetition_penalty, 93 top_k=pb.top_k, 94 top_p=pb.top_p, 95 do_sample=pb.do_sample, 96 seed=pb.seed, 97 device=device, 98 ) 99 100 101 class StopSequenceCriteria: 102 def __init__(self, stop_sequence: str): 103 self.regex = re.compile(f".*{stop_sequence}$") 104 105 def __call__(self, output: str) -> bool: 106 if self.regex.findall(output): 107 return True 108 return False 109 110 111 class StoppingCriteria: 112 def __init__( 113 self, 114 eos_token_id: int, 115 stop_sequence_criterias: List[StopSequenceCriteria], 116 max_new_tokens=20, 117 ): 118 self.eos_token_id = eos_token_id 119 self.stop_sequence_criterias = stop_sequence_criterias 120 self.max_new_tokens = max_new_tokens 121 self.current_tokens = 0 122 self.current_output = "" 123 124 def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: 125 self.current_tokens += 1 126 if self.current_tokens >= self.max_new_tokens: 127 return True, FinishReason.FINISH_REASON_LENGTH 128 129 if last_token == self.eos_token_id: 130 return True, FinishReason.FINISH_REASON_EOS_TOKEN 131 132 self.current_output += last_output 133 for stop_sequence_criteria in self.stop_sequence_criterias: 134 if stop_sequence_criteria(self.current_output): 135 return True, FinishReason.FINISH_REASON_STOP_SEQUENCE 136 137 return False, None 138 139 @classmethod 140 def from_pb( 141 cls, 142 pb: generate_pb2.StoppingCriteriaParameters, 143 tokenizer: PreTrainedTokenizerBase, 144 ) -> "StoppingCriteria": 145 stop_sequence_criterias = [ 146 StopSequenceCriteria(sequence) for sequence in pb.stop_sequences 147 ] 148 return StoppingCriteria( 149 tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens 150 ) 151 [end of server/text_generation_server/utils/tokens.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -6,6 +6,7 @@ TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, + TypicalLogitsWarper, RepetitionPenaltyLogitsProcessor, PreTrainedTokenizerBase, ) @@ -41,6 +42,7 @@ repetition_penalty=1.0, top_k=None, top_p=None, + typical_p=None, do_sample=False, seed=0, device="cpu", @@ -64,6 +66,9 @@ if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p)) sampling = True + if typical_p is not None and typical_p < 1.0: + warpers.append(TypicalLogitsWarper(mass=typical_p)) + sampling = True self.warpers = warpers self.choice = Sampling(seed, device) if sampling else Greedy() @@ -92,6 +97,7 @@ repetition_penalty=pb.repetition_penalty, top_k=pb.top_k, top_p=pb.top_p, + typical_p=pb.typical_p, do_sample=pb.do_sample, seed=pb.seed, device=device,
{"golden_diff": "diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py\n--- a/server/text_generation_server/utils/tokens.py\n+++ b/server/text_generation_server/utils/tokens.py\n@@ -6,6 +6,7 @@\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n+ TypicalLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n )\n@@ -41,6 +42,7 @@\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n+ typical_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n@@ -64,6 +66,9 @@\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n+ if typical_p is not None and typical_p < 1.0:\n+ warpers.append(TypicalLogitsWarper(mass=typical_p))\n+ sampling = True\n \n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n@@ -92,6 +97,7 @@\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n+ typical_p=pb.typical_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n", "issue": "Add typical sampling\nHF has TypicalWarper and `typical_p`. Would be nice to be able to pass that.\n", "before_files": [{"content": "import re\nimport torch\n\nfrom transformers import (\n LogitsProcessorList,\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n)\nfrom typing import List, Tuple, Optional\n\nfrom text_generation_server.pb import generate_pb2\nfrom text_generation_server.pb.generate_pb2 import FinishReason\nfrom text_generation_server.utils.watermark import WatermarkLogitsProcessor\n\n\nclass Sampling:\n def __init__(self, seed: int, device: str = \"cpu\"):\n self.generator = torch.Generator(device)\n self.generator.manual_seed(seed)\n self.seed = seed\n\n def __call__(self, logits):\n probs = torch.nn.functional.softmax(logits)\n next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator)\n return next_tokens\n\n\nclass Greedy:\n def __call__(self, logits):\n return logits.argmax()\n\n\nclass NextTokenChooser:\n def __init__(\n self,\n watermark=False,\n temperature=1.0,\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n ):\n warpers = LogitsProcessorList()\n # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files\n # all samplers can be found in `generation_utils_samplers.py`\n sampling = do_sample\n\n if watermark:\n warpers.append(WatermarkLogitsProcessor(device=device))\n if repetition_penalty is not None and repetition_penalty != 1.0:\n warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))\n if temperature is not None and temperature != 1.0:\n temperature = float(temperature)\n warpers.append(TemperatureLogitsWarper(temperature))\n sampling = True\n if top_k is not None and top_k != 0:\n warpers.append(TopKLogitsWarper(top_k=top_k))\n sampling = True\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n\n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n\n def __call__(self, input_ids, scores):\n # Warp logits\n scores = self.warpers(input_ids, scores)\n\n # Compute logprobs\n logprobs = torch.log_softmax(scores, -1)\n\n # Choose tokens\n next_id = self.choice(scores[-1])\n\n return next_id.view(1, 1), logprobs\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.NextTokenChooserParameters,\n device: torch.device,\n ) -> \"NextTokenChooser\":\n return NextTokenChooser(\n watermark=pb.watermark,\n temperature=pb.temperature,\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n )\n\n\nclass StopSequenceCriteria:\n def __init__(self, stop_sequence: str):\n self.regex = re.compile(f\".*{stop_sequence}$\")\n\n def __call__(self, output: str) -> bool:\n if self.regex.findall(output):\n return True\n return False\n\n\nclass StoppingCriteria:\n def __init__(\n self,\n eos_token_id: int,\n stop_sequence_criterias: List[StopSequenceCriteria],\n max_new_tokens=20,\n ):\n self.eos_token_id = eos_token_id\n self.stop_sequence_criterias = stop_sequence_criterias\n self.max_new_tokens = max_new_tokens\n self.current_tokens = 0\n self.current_output = \"\"\n\n def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]:\n self.current_tokens += 1\n if self.current_tokens >= self.max_new_tokens:\n return True, FinishReason.FINISH_REASON_LENGTH\n\n if last_token == self.eos_token_id:\n return True, FinishReason.FINISH_REASON_EOS_TOKEN\n\n self.current_output += last_output\n for stop_sequence_criteria in self.stop_sequence_criterias:\n if stop_sequence_criteria(self.current_output):\n return True, FinishReason.FINISH_REASON_STOP_SEQUENCE\n\n return False, None\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.StoppingCriteriaParameters,\n tokenizer: PreTrainedTokenizerBase,\n ) -> \"StoppingCriteria\":\n stop_sequence_criterias = [\n StopSequenceCriteria(sequence) for sequence in pb.stop_sequences\n ]\n return StoppingCriteria(\n tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens\n )\n", "path": "server/text_generation_server/utils/tokens.py"}]}
2,003
334
gh_patches_debug_4842
rasdani/github-patches
git_diff
plone__Products.CMFPlone-3972
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> TypeError thrown when during groups lookup The offending code is [here](https://github.com/plone/Products.CMFPlone/blob/308aa4d03ee6c0ce9d8119ce4c37955153f0bc6f/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py#L66). The traceback looks like this: ``` Traceback (innermost last): Module ZPublisher.WSGIPublisher, line 176, in transaction_pubevents Module ZPublisher.WSGIPublisher, line 385, in publish_module Module ZPublisher.WSGIPublisher, line 280, in publish Module ZPublisher.mapply, line 85, in mapply Module ZPublisher.WSGIPublisher, line 63, in call_object Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 57, in __call__ Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 54, in update Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 63, in getGroups TypeError: '<' not supported between instances of 'bool' and 'str' ``` The issue is that when there's a `None` value in the `groupResults` (which is anticipated in the sort code) the lambda returns `False` which fails to compare against the group title/name strings under Python 3. The list comprehension that defines `groupResults` should probably just filter out `None` values to avoid this issue. I'm not entirely sure what circumstances result in a `None` group value, but I am seeing it occur in a real world use case. </issue> <code> [start of Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py] 1 from plone.base import PloneMessageFactory as _ 2 from Products.CMFCore.utils import getToolByName 3 from Products.CMFPlone.controlpanel.browser.usergroups import ( 4 UsersGroupsControlPanelView, 5 ) 6 from Products.CMFPlone.utils import normalizeString 7 from zExceptions import Forbidden 8 9 10 class UserMembershipControlPanel(UsersGroupsControlPanelView): 11 def update(self): 12 self.userid = getattr(self.request, "userid") 13 self.gtool = getToolByName(self, "portal_groups") 14 self.mtool = getToolByName(self, "portal_membership") 15 self.member = self.mtool.getMemberById(self.userid) 16 17 form = self.request.form 18 19 self.searchResults = [] 20 self.searchString = "" 21 self.newSearch = False 22 23 if form.get("form.submitted", False): 24 delete = form.get("delete", []) 25 if delete: 26 for groupname in delete: 27 self.gtool.removePrincipalFromGroup( 28 self.userid, groupname, self.request 29 ) 30 self.context.plone_utils.addPortalMessage(_("Changes made.")) 31 32 add = form.get("add", []) 33 if add: 34 for groupname in add: 35 group = self.gtool.getGroupById(groupname) 36 if "Manager" in group.getRoles() and not self.is_zope_manager: 37 raise Forbidden 38 39 self.gtool.addPrincipalToGroup(self.userid, groupname, self.request) 40 self.context.plone_utils.addPortalMessage(_("Changes made.")) 41 42 search = form.get("form.button.Search", None) is not None 43 findAll = ( 44 form.get("form.button.FindAll", None) is not None and not self.many_groups 45 ) 46 self.searchString = not findAll and form.get("searchstring", "") or "" 47 48 if findAll or not self.many_groups or self.searchString != "": 49 self.searchResults = self.getPotentialGroups(self.searchString) 50 51 if search or findAll: 52 self.newSearch = True 53 54 self.groups = self.getGroups() 55 56 def __call__(self): 57 self.update() 58 return self.index() 59 60 def getGroups(self): 61 groupResults = [ 62 self.gtool.getGroupById(m) 63 for m in self.gtool.getGroupsForPrincipal(self.member) 64 ] 65 groupResults.sort( 66 key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()) 67 ) 68 return [i for i in groupResults if i] 69 70 def getPotentialGroups(self, searchString): 71 ignoredGroups = [x.id for x in self.getGroups() if x is not None] 72 return self.membershipSearch( 73 searchString, searchUsers=False, ignore=ignoredGroups 74 ) 75 [end of Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py --- a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py +++ b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py @@ -63,7 +63,7 @@ for m in self.gtool.getGroupsForPrincipal(self.member) ] groupResults.sort( - key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()) + key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else '' ) return [i for i in groupResults if i]
{"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n@@ -63,7 +63,7 @@\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n+ key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else ''\n )\n return [i for i in groupResults if i]\n", "issue": "TypeError thrown when during groups lookup\nThe offending code is [here](https://github.com/plone/Products.CMFPlone/blob/308aa4d03ee6c0ce9d8119ce4c37955153f0bc6f/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py#L66). The traceback looks like this:\r\n```\r\nTraceback (innermost last):\r\n Module ZPublisher.WSGIPublisher, line 176, in transaction_pubevents\r\n Module ZPublisher.WSGIPublisher, line 385, in publish_module\r\n Module ZPublisher.WSGIPublisher, line 280, in publish\r\n Module ZPublisher.mapply, line 85, in mapply\r\n Module ZPublisher.WSGIPublisher, line 63, in call_object\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 57, in __call__\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 54, in update\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 63, in getGroups\r\nTypeError: '<' not supported between instances of 'bool' and 'str'\r\n```\r\n\r\nThe issue is that when there's a `None` value in the `groupResults` (which is anticipated in the sort code) the lambda returns `False` which fails to compare against the group title/name strings under Python 3. The list comprehension that defines `groupResults` should probably just filter out `None` values to avoid this issue. I'm not entirely sure what circumstances result in a `None` group value, but I am seeing it occur in a real world use case.\n", "before_files": [{"content": "from plone.base import PloneMessageFactory as _\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass UserMembershipControlPanel(UsersGroupsControlPanelView):\n def update(self):\n self.userid = getattr(self.request, \"userid\")\n self.gtool = getToolByName(self, \"portal_groups\")\n self.mtool = getToolByName(self, \"portal_membership\")\n self.member = self.mtool.getMemberById(self.userid)\n\n form = self.request.form\n\n self.searchResults = []\n self.searchString = \"\"\n self.newSearch = False\n\n if form.get(\"form.submitted\", False):\n delete = form.get(\"delete\", [])\n if delete:\n for groupname in delete:\n self.gtool.removePrincipalFromGroup(\n self.userid, groupname, self.request\n )\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n add = form.get(\"add\", [])\n if add:\n for groupname in add:\n group = self.gtool.getGroupById(groupname)\n if \"Manager\" in group.getRoles() and not self.is_zope_manager:\n raise Forbidden\n\n self.gtool.addPrincipalToGroup(self.userid, groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n search = form.get(\"form.button.Search\", None) is not None\n findAll = (\n form.get(\"form.button.FindAll\", None) is not None and not self.many_groups\n )\n self.searchString = not findAll and form.get(\"searchstring\", \"\") or \"\"\n\n if findAll or not self.many_groups or self.searchString != \"\":\n self.searchResults = self.getPotentialGroups(self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groups = self.getGroups()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def getGroups(self):\n groupResults = [\n self.gtool.getGroupById(m)\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n )\n return [i for i in groupResults if i]\n\n def getPotentialGroups(self, searchString):\n ignoredGroups = [x.id for x in self.getGroups() if x is not None]\n return self.membershipSearch(\n searchString, searchUsers=False, ignore=ignoredGroups\n )\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py"}]}
1,642
159
gh_patches_debug_649
rasdani/github-patches
git_diff
pex-tool__pex-1997
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.116 On the docket: + [x] The --resolve-local-platforms option does not work with --complete-platforms #1899 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.115" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.115" +__version__ = "2.1.116"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.115\"\n+__version__ = \"2.1.116\"\n", "issue": "Release 2.1.116\nOn the docket:\r\n+ [x] The --resolve-local-platforms option does not work with --complete-platforms #1899\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.115\"\n", "path": "pex/version.py"}]}
624
99
gh_patches_debug_14330
rasdani/github-patches
git_diff
pyca__cryptography-5825
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Version Scheme Change We've talked on and off for quite a few years about our versioning scheme for `cryptography`, but #5771 made it very clear that despite our [versioning documentation](https://cryptography.io/en/latest/api-stability.html#versioning) some users still assume it's [SemVer](https://semver.org) and are caught off guard by changes. I propose that we **switch to a [CalVer](https://calver.org) scheme** for the thirty fifth feature release (which we currently would call 3.5). This has the advantage of being unambiguously not semantic versioning but encoding some vaguely useful information (the year of release) in it. ### Alternate Choices **Stay the course** We've successfully used our versioning scheme for a bit over 7 years now and the probability of another monumental shift like this is low. There is, however, a constant (but low) background radiation of people who are tripped up by making assumptions about our versioning so I would argue against this choice. **SemVer** Switch to using an actual semantic versioning scheme. Without re-litigating years of conversations, I don't believe either @alex or myself are interested in this option. (See https://github.com/pyca/cryptography/issues/5801#issuecomment-776067787 for a bit of elaboration on our position) **Firefox/Chrome Versioning** (From @alex) "Its merits are that it's technically semver compatible, it's fairly predictable, and it communicates at least a little info on the relative age of the project. Its demerits are that it probably won't solve the real problem here, which was that folks were surprised a major change happened at all." </issue> <code> [start of src/cryptography/__about__.py] 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 6 __all__ = [ 7 "__title__", 8 "__summary__", 9 "__uri__", 10 "__version__", 11 "__author__", 12 "__email__", 13 "__license__", 14 "__copyright__", 15 ] 16 17 __title__ = "cryptography" 18 __summary__ = ( 19 "cryptography is a package which provides cryptographic recipes" 20 " and primitives to Python developers." 21 ) 22 __uri__ = "https://github.com/pyca/cryptography" 23 24 __version__ = "3.5.dev1" 25 26 __author__ = "The Python Cryptographic Authority and individual contributors" 27 __email__ = "[email protected]" 28 29 __license__ = "BSD or Apache License, Version 2.0" 30 __copyright__ = "Copyright 2013-2021 {}".format(__author__) 31 [end of src/cryptography/__about__.py] [start of vectors/cryptography_vectors/__about__.py] 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 __all__ = [ 6 "__title__", 7 "__summary__", 8 "__uri__", 9 "__version__", 10 "__author__", 11 "__email__", 12 "__license__", 13 "__copyright__", 14 ] 15 16 __title__ = "cryptography_vectors" 17 __summary__ = "Test vectors for the cryptography package." 18 19 __uri__ = "https://github.com/pyca/cryptography" 20 21 __version__ = "3.5.dev1" 22 23 __author__ = "The Python Cryptographic Authority and individual contributors" 24 __email__ = "[email protected]" 25 26 __license__ = "BSD or Apache License, Version 2.0" 27 __copyright__ = "Copyright 2013-2021 %s" % __author__ 28 [end of vectors/cryptography_vectors/__about__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py --- a/src/cryptography/__about__.py +++ b/src/cryptography/__about__.py @@ -21,7 +21,7 @@ ) __uri__ = "https://github.com/pyca/cryptography" -__version__ = "3.5.dev1" +__version__ = "35.0.0.dev1" __author__ = "The Python Cryptographic Authority and individual contributors" __email__ = "[email protected]" diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py --- a/vectors/cryptography_vectors/__about__.py +++ b/vectors/cryptography_vectors/__about__.py @@ -18,7 +18,7 @@ __uri__ = "https://github.com/pyca/cryptography" -__version__ = "3.5.dev1" +__version__ = "35.0.0.dev1" __author__ = "The Python Cryptographic Authority and individual contributors" __email__ = "[email protected]"
{"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -21,7 +21,7 @@\n )\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.5.dev1\"\n+__version__ = \"35.0.0.dev1\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -18,7 +18,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.5.dev1\"\n+__version__ = \"35.0.0.dev1\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\n", "issue": "Version Scheme Change\nWe've talked on and off for quite a few years about our versioning scheme for `cryptography`, but #5771 made it very clear that despite our [versioning documentation](https://cryptography.io/en/latest/api-stability.html#versioning) some users still assume it's [SemVer](https://semver.org) and are caught off guard by changes. I propose that we **switch to a [CalVer](https://calver.org) scheme** for the thirty fifth feature release (which we currently would call 3.5). This has the advantage of being unambiguously not semantic versioning but encoding some vaguely useful information (the year of release) in it.\r\n\r\n### Alternate Choices\r\n**Stay the course**\r\nWe've successfully used our versioning scheme for a bit over 7 years now and the probability of another monumental shift like this is low. There is, however, a constant (but low) background radiation of people who are tripped up by making assumptions about our versioning so I would argue against this choice.\r\n\r\n**SemVer**\r\nSwitch to using an actual semantic versioning scheme. Without re-litigating years of conversations, I don't believe either @alex or myself are interested in this option. (See https://github.com/pyca/cryptography/issues/5801#issuecomment-776067787 for a bit of elaboration on our position)\r\n\r\n**Firefox/Chrome Versioning**\r\n(From @alex)\r\n\"Its merits are that it's technically semver compatible, it's fairly predictable, and it communicates at least a little info on the relative age of the project.\r\n\r\nIts demerits are that it probably won't solve the real problem here, which was that folks were surprised a major change happened at all.\"\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.5.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.5.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]}
1,458
253
gh_patches_debug_25209
rasdani/github-patches
git_diff
urllib3__urllib3-516
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support body param in RequestMethods.request Easiest way to do this is by avoiding defining a body kw when no fields are given, then if both are given it will naturally raise a "passed twice" error. </issue> <code> [start of urllib3/request.py] 1 try: 2 from urllib.parse import urlencode 3 except ImportError: 4 from urllib import urlencode 5 6 from .filepost import encode_multipart_formdata 7 8 9 __all__ = ['RequestMethods'] 10 11 12 class RequestMethods(object): 13 """ 14 Convenience mixin for classes who implement a :meth:`urlopen` method, such 15 as :class:`~urllib3.connectionpool.HTTPConnectionPool` and 16 :class:`~urllib3.poolmanager.PoolManager`. 17 18 Provides behavior for making common types of HTTP request methods and 19 decides which type of request field encoding to use. 20 21 Specifically, 22 23 :meth:`.request_encode_url` is for sending requests whose fields are 24 encoded in the URL (such as GET, HEAD, DELETE). 25 26 :meth:`.request_encode_body` is for sending requests whose fields are 27 encoded in the *body* of the request using multipart or www-form-urlencoded 28 (such as for POST, PUT, PATCH). 29 30 :meth:`.request` is for making any kind of request, it will look up the 31 appropriate encoding format and use one of the above two methods to make 32 the request. 33 34 Initializer parameters: 35 36 :param headers: 37 Headers to include with all requests, unless other headers are given 38 explicitly. 39 """ 40 41 _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) 42 43 def __init__(self, headers=None): 44 self.headers = headers or {} 45 46 def urlopen(self, method, url, body=None, headers=None, 47 encode_multipart=True, multipart_boundary=None, 48 **kw): # Abstract 49 raise NotImplemented("Classes extending RequestMethods must implement " 50 "their own ``urlopen`` method.") 51 52 def request(self, method, url, fields=None, headers=None, **urlopen_kw): 53 """ 54 Make a request using :meth:`urlopen` with the appropriate encoding of 55 ``fields`` based on the ``method`` used. 56 57 This is a convenience method that requires the least amount of manual 58 effort. It can be used in most situations, while still having the 59 option to drop down to more specific methods when necessary, such as 60 :meth:`request_encode_url`, :meth:`request_encode_body`, 61 or even the lowest level :meth:`urlopen`. 62 """ 63 method = method.upper() 64 65 if method in self._encode_url_methods: 66 return self.request_encode_url(method, url, fields=fields, 67 headers=headers, 68 **urlopen_kw) 69 else: 70 return self.request_encode_body(method, url, fields=fields, 71 headers=headers, 72 **urlopen_kw) 73 74 def request_encode_url(self, method, url, fields=None, **urlopen_kw): 75 """ 76 Make a request using :meth:`urlopen` with the ``fields`` encoded in 77 the url. This is useful for request methods like GET, HEAD, DELETE, etc. 78 """ 79 if fields: 80 url += '?' + urlencode(fields) 81 return self.urlopen(method, url, **urlopen_kw) 82 83 def request_encode_body(self, method, url, fields=None, headers=None, 84 encode_multipart=True, multipart_boundary=None, 85 **urlopen_kw): 86 """ 87 Make a request using :meth:`urlopen` with the ``fields`` encoded in 88 the body. This is useful for request methods like POST, PUT, PATCH, etc. 89 90 When ``encode_multipart=True`` (default), then 91 :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode 92 the payload with the appropriate content type. Otherwise 93 :meth:`urllib.urlencode` is used with the 94 'application/x-www-form-urlencoded' content type. 95 96 Multipart encoding must be used when posting files, and it's reasonably 97 safe to use it in other times too. However, it may break request 98 signing, such as with OAuth. 99 100 Supports an optional ``fields`` parameter of key/value strings AND 101 key/filetuple. A filetuple is a (filename, data, MIME type) tuple where 102 the MIME type is optional. For example:: 103 104 fields = { 105 'foo': 'bar', 106 'fakefile': ('foofile.txt', 'contents of foofile'), 107 'realfile': ('barfile.txt', open('realfile').read()), 108 'typedfile': ('bazfile.bin', open('bazfile').read(), 109 'image/jpeg'), 110 'nonamefile': 'contents of nonamefile field', 111 } 112 113 When uploading a file, providing a filename (the first parameter of the 114 tuple) is optional but recommended to best mimick behavior of browsers. 115 116 Note that if ``headers`` are supplied, the 'Content-Type' header will 117 be overwritten because it depends on the dynamic random boundary string 118 which is used to compose the body of the request. The random boundary 119 string can be explicitly set with the ``multipart_boundary`` parameter. 120 """ 121 if encode_multipart: 122 body, content_type = encode_multipart_formdata( 123 fields or {}, boundary=multipart_boundary) 124 else: 125 body, content_type = (urlencode(fields or {}), 126 'application/x-www-form-urlencoded') 127 128 if headers is None: 129 headers = self.headers 130 131 headers_ = {'Content-Type': content_type} 132 headers_.update(headers) 133 134 return self.urlopen(method, url, body=body, headers=headers_, 135 **urlopen_kw) 136 [end of urllib3/request.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/urllib3/request.py b/urllib3/request.py --- a/urllib3/request.py +++ b/urllib3/request.py @@ -118,18 +118,24 @@ which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ - if encode_multipart: - body, content_type = encode_multipart_formdata( - fields or {}, boundary=multipart_boundary) - else: - body, content_type = (urlencode(fields or {}), - 'application/x-www-form-urlencoded') - if headers is None: headers = self.headers - headers_ = {'Content-Type': content_type} - headers_.update(headers) + extra_kw = {'headers': {}} + + if fields: + if 'body' in urlopen_kw: + raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.') + + if encode_multipart: + body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) + else: + body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' + + extra_kw['body'] = body + extra_kw['headers'] = {'Content-Type': content_type} + + extra_kw['headers'].update(headers) + extra_kw.update(urlopen_kw) - return self.urlopen(method, url, body=body, headers=headers_, - **urlopen_kw) + return self.urlopen(method, url, **extra_kw)
{"golden_diff": "diff --git a/urllib3/request.py b/urllib3/request.py\n--- a/urllib3/request.py\n+++ b/urllib3/request.py\n@@ -118,18 +118,24 @@\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n- if encode_multipart:\n- body, content_type = encode_multipart_formdata(\n- fields or {}, boundary=multipart_boundary)\n- else:\n- body, content_type = (urlencode(fields or {}),\n- 'application/x-www-form-urlencoded')\n-\n if headers is None:\n headers = self.headers\n \n- headers_ = {'Content-Type': content_type}\n- headers_.update(headers)\n+ extra_kw = {'headers': {}}\n+\n+ if fields:\n+ if 'body' in urlopen_kw:\n+ raise TypeError('request got values for both \\'fields\\' and \\'body\\', can only specify one.')\n+\n+ if encode_multipart:\n+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)\n+ else:\n+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'\n+\n+ extra_kw['body'] = body\n+ extra_kw['headers'] = {'Content-Type': content_type}\n+\n+ extra_kw['headers'].update(headers)\n+ extra_kw.update(urlopen_kw)\n \n- return self.urlopen(method, url, body=body, headers=headers_,\n- **urlopen_kw)\n+ return self.urlopen(method, url, **extra_kw)\n", "issue": "Support body param in RequestMethods.request\nEasiest way to do this is by avoiding defining a body kw when no fields are given, then if both are given it will naturally raise a \"passed twice\" error.\n\n", "before_files": [{"content": "try:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\nfrom .filepost import encode_multipart_formdata\n\n\n__all__ = ['RequestMethods']\n\n\nclass RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`~urllib3.connectionpool.HTTPConnectionPool` and\n :class:`~urllib3.poolmanager.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(self, method, url, body=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **kw): # Abstract\n raise NotImplemented(\"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\")\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if method in self._encode_url_methods:\n return self.request_encode_url(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n else:\n return self.request_encode_body(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n\n def request_encode_url(self, method, url, fields=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if fields:\n url += '?' + urlencode(fields)\n return self.urlopen(method, url, **urlopen_kw)\n\n def request_encode_body(self, method, url, fields=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :meth:`urllib.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimick behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields or {}, boundary=multipart_boundary)\n else:\n body, content_type = (urlencode(fields or {}),\n 'application/x-www-form-urlencoded')\n\n if headers is None:\n headers = self.headers\n\n headers_ = {'Content-Type': content_type}\n headers_.update(headers)\n\n return self.urlopen(method, url, body=body, headers=headers_,\n **urlopen_kw)\n", "path": "urllib3/request.py"}]}
2,041
352
gh_patches_debug_23722
rasdani/github-patches
git_diff
intel__dffml-585
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> docs: source: New File source tutorial The question of how to make a new data source came up [recently](https://gitter.im/dffml/community?at=5e0e4bfbfd580457e7b3e26a). We should have multiple tutorials for sources. We're going to use https://github.com/intel/dffml/issues/551 as the example for this tutorial, so that issue needs to be closed first. You'll want to make a `docs/tutorials/sources/index.rst` Then take this exist tutorial and move it under `docs/tutoirials/sources/complex.rst` You're new one will go under `sources/file.rst` Te idea here is just to explain how you can write a source which reads data from some new filetype, with our example being the `.ini` Try to follow the new model tutorial format, where we do `literalinclude` and `:lines:` to talk about individual sections of the files we're writing. </issue> <code> [start of dffml/source/ini.py] 1 from configparser import ConfigParser 2 3 from ..base import config 4 from ..record import Record 5 from .file import FileSource 6 from .memory import MemorySource 7 from ..util.data import parser_helper 8 from ..util.entrypoint import entrypoint 9 10 11 @config 12 class INISourceConfig: 13 filename: str 14 readwrite: bool = False 15 allowempty: bool = False 16 17 18 @entrypoint("ini") 19 class INISource(FileSource, MemorySource): 20 """ 21 Source to read files in .ini format. 22 """ 23 24 CONFIG = INISourceConfig 25 26 async def load_fd(self, ifile): 27 # Creating an instance of configparser 28 parser = ConfigParser() 29 # Read from a file object 30 parser.read_file(ifile) 31 # Get all the sections present in the file 32 sections = parser.sections() 33 34 self.mem = {} 35 36 # Go over each section 37 for section in sections: 38 # Get data under each section as a dict 39 temp_dict = {} 40 for k, v in parser.items(section): 41 temp_dict[k] = parser_helper(v) 42 # Each section used as a record 43 self.mem[str(section)] = Record( 44 str(section), data={"features": temp_dict}, 45 ) 46 47 self.logger.debug("%r loaded %d sections", self, len(self.mem)) 48 49 async def dump_fd(self, fd): 50 # Create an instance of configparser 51 parser = ConfigParser() 52 53 # Go over each section and record in mem 54 for section, record in self.mem.items(): 55 # Get each section data as a dict 56 section_data = record.features() 57 if section not in parser.keys(): 58 # If section does not exist add new section 59 parser.add_section(section) 60 # Set section data 61 parser[section] = section_data 62 63 # Write to the fileobject 64 parser.write(fd) 65 66 self.logger.debug("%r saved %d sections", self, len(self.mem)) 67 [end of dffml/source/ini.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dffml/source/ini.py b/dffml/source/ini.py --- a/dffml/source/ini.py +++ b/dffml/source/ini.py @@ -23,11 +23,11 @@ CONFIG = INISourceConfig - async def load_fd(self, ifile): + async def load_fd(self, fileobj): # Creating an instance of configparser parser = ConfigParser() # Read from a file object - parser.read_file(ifile) + parser.read_file(fileobj) # Get all the sections present in the file sections = parser.sections() @@ -46,7 +46,7 @@ self.logger.debug("%r loaded %d sections", self, len(self.mem)) - async def dump_fd(self, fd): + async def dump_fd(self, fileobj): # Create an instance of configparser parser = ConfigParser() @@ -61,6 +61,6 @@ parser[section] = section_data # Write to the fileobject - parser.write(fd) + parser.write(fileobj) self.logger.debug("%r saved %d sections", self, len(self.mem))
{"golden_diff": "diff --git a/dffml/source/ini.py b/dffml/source/ini.py\n--- a/dffml/source/ini.py\n+++ b/dffml/source/ini.py\n@@ -23,11 +23,11 @@\n \n CONFIG = INISourceConfig\n \n- async def load_fd(self, ifile):\n+ async def load_fd(self, fileobj):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n- parser.read_file(ifile)\n+ parser.read_file(fileobj)\n # Get all the sections present in the file\n sections = parser.sections()\n \n@@ -46,7 +46,7 @@\n \n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n \n- async def dump_fd(self, fd):\n+ async def dump_fd(self, fileobj):\n # Create an instance of configparser\n parser = ConfigParser()\n \n@@ -61,6 +61,6 @@\n parser[section] = section_data\n \n # Write to the fileobject\n- parser.write(fd)\n+ parser.write(fileobj)\n \n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "issue": "docs: source: New File source tutorial\nThe question of how to make a new data source came up [recently](https://gitter.im/dffml/community?at=5e0e4bfbfd580457e7b3e26a). We should have multiple tutorials for sources.\r\n\r\nWe're going to use https://github.com/intel/dffml/issues/551 as the example for this tutorial, so that issue needs to be closed first.\r\n\r\nYou'll want to make a\r\n\r\n`docs/tutorials/sources/index.rst`\r\n\r\nThen take this exist tutorial and move it under\r\n\r\n`docs/tutoirials/sources/complex.rst`\r\n\r\nYou're new one will go under \r\n\r\n`sources/file.rst`\r\n\r\nTe idea here is just to explain how you can write a source which reads data from some new filetype, with our example being the `.ini`\r\n\r\nTry to follow the new model tutorial format, where we do `literalinclude` and `:lines:` to talk about individual sections of the files we're writing.\n", "before_files": [{"content": "from configparser import ConfigParser\n\nfrom ..base import config\nfrom ..record import Record\nfrom .file import FileSource\nfrom .memory import MemorySource\nfrom ..util.data import parser_helper\nfrom ..util.entrypoint import entrypoint\n\n\n@config\nclass INISourceConfig:\n filename: str\n readwrite: bool = False\n allowempty: bool = False\n\n\n@entrypoint(\"ini\")\nclass INISource(FileSource, MemorySource):\n \"\"\"\n Source to read files in .ini format.\n \"\"\"\n\n CONFIG = INISourceConfig\n\n async def load_fd(self, ifile):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n parser.read_file(ifile)\n # Get all the sections present in the file\n sections = parser.sections()\n\n self.mem = {}\n\n # Go over each section\n for section in sections:\n # Get data under each section as a dict\n temp_dict = {}\n for k, v in parser.items(section):\n temp_dict[k] = parser_helper(v)\n # Each section used as a record\n self.mem[str(section)] = Record(\n str(section), data={\"features\": temp_dict},\n )\n\n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n\n async def dump_fd(self, fd):\n # Create an instance of configparser\n parser = ConfigParser()\n\n # Go over each section and record in mem\n for section, record in self.mem.items():\n # Get each section data as a dict\n section_data = record.features()\n if section not in parser.keys():\n # If section does not exist add new section\n parser.add_section(section)\n # Set section data\n parser[section] = section_data\n\n # Write to the fileobject\n parser.write(fd)\n\n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "path": "dffml/source/ini.py"}]}
1,305
268
gh_patches_debug_35267
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-6952
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `hook-pkg_resources.py` is including hidden imports that are not available https://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L20 and https://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L30 Refer to https://github.com/pypa/setuptools/issues/1963 for the past need for including `py2_warn` and [the subsequent removal of py2_warn from setuptools](https://github.com/pypa/setuptools/pull/2238). The `pkg_resources.py2_warn` submodule was introduced in v45.0.0 and removed in v49.1.1. Therefore, an check for those versions would look like this: ```python if is_module_satisfies("setuptools >= 45.0.0, < 49.1.1"): hiddenimports.append('pkg_resources.py2_warn') ``` As for `pkg_resources.markers`, I haven't found any references to any `pkg_resources.markers` packages through Google searches. I'm not convinced that it ever existed at any point. Perhaps it was being confused for (or just a typo of) `packaging.markers`, which does exist. Since `hook-pkg_resources.py` already collects the submodules of `packaging`, there doesn't need to be an extra hidden import added for `packaging.markers`. Therefore, I think that the `pkg_resources.markers` hidden import line can be removed completely. Another module that it is trying to include is `pkg_resources._vendor.pyparsing.diagrams`. This module can only be used when the `railroad` module is available. Therefore, a check should be added to make sure that the `railroad` module is available, and filter out the diagrams module when it isn't. For example: ```python from PyInstaller.utils.hooks import ( collect_submodules, is_module_satisfies, can_import_module, is_module_or_submodule ) hiddenimports = [] # pkg_resources keeps vendored modules in its _vendor subpackage and does sys.meta_path based import magic to expose # them as pkg_resources.extern.* if not can_import_module('railroad'): # The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so # filter it out when `railroad` is not available. hiddenimports += collect_submodules( 'pkg_resources._vendor', lambda name: is_module_or_submodule(name, 'pkg_resources._vendor.pyparsing.diagrams')) else: hiddenimports += collect_submodules('pkg_resources._vendor') ``` PyInstaller: v5.1 </issue> <code> [start of PyInstaller/hooks/hook-pkg_resources.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2022, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies 13 14 # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose 15 # them as pkg_resources.extern.* 16 hiddenimports = collect_submodules('pkg_resources._vendor') 17 18 # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if 19 # we would replace this by a fake module. 20 hiddenimports.append('pkg_resources.py2_warn') 21 22 excludedimports = ['__main__'] 23 24 # Some more hidden imports. See: 25 # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be 26 # its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if 27 # present. 28 hiddenimports += collect_submodules('packaging') 29 30 hiddenimports += ['pkg_resources.markers'] 31 32 # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco 33 # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook 34 # utilities. 35 # 36 # In setuptools 60.7.0, the vendored jaraco.text package included "Lorem Ipsum.txt" data file, which also has to be 37 # collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's 38 # redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools. 39 if is_module_satisfies("setuptools == 60.7.0"): 40 raise SystemExit( 41 "ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. " 42 "Downgrade to an earlier version or upgrade to a later version." 43 ) 44 # In setuptools 60.7.1, the "Lorem Ipsum.txt" data file was dropped from the vendored jaraco.text package, so we can 45 # accommodate it with couple of hidden imports. 46 elif is_module_satisfies("setuptools >= 60.7.1"): 47 hiddenimports += [ 48 'pkg_resources._vendor.jaraco.functools', 49 'pkg_resources._vendor.jaraco.context', 50 'pkg_resources._vendor.jaraco.text', 51 ] 52 [end of PyInstaller/hooks/hook-pkg_resources.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py --- a/PyInstaller/hooks/hook-pkg_resources.py +++ b/PyInstaller/hooks/hook-pkg_resources.py @@ -9,26 +9,32 @@ # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) #----------------------------------------------------------------------------- -from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies +from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose # them as pkg_resources.extern.* -hiddenimports = collect_submodules('pkg_resources._vendor') + +# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so +# filter it out when `railroad` is not available. +if can_import_module('railroad'): + hiddenimports = collect_submodules('pkg_resources._vendor') +else: + hiddenimports = collect_submodules( + 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name + ) # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if # we would replace this by a fake module. -hiddenimports.append('pkg_resources.py2_warn') +if is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'): + hiddenimports.append('pkg_resources.py2_warn') excludedimports = ['__main__'] # Some more hidden imports. See: # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be -# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if -# present. +# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. hiddenimports += collect_submodules('packaging') -hiddenimports += ['pkg_resources.markers'] - # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook # utilities.
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -9,26 +9,32 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n-from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n+from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module\n \n # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n # them as pkg_resources.extern.*\n-hiddenimports = collect_submodules('pkg_resources._vendor')\n+\n+# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\n+# filter it out when `railroad` is not available.\n+if can_import_module('railroad'):\n+ hiddenimports = collect_submodules('pkg_resources._vendor')\n+else:\n+ hiddenimports = collect_submodules(\n+ 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name\n+ )\n \n # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n # we would replace this by a fake module.\n-hiddenimports.append('pkg_resources.py2_warn')\n+if is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'):\n+ hiddenimports.append('pkg_resources.py2_warn')\n \n excludedimports = ['__main__']\n \n # Some more hidden imports. See:\n # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n-# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n-# present.\n+# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.\n hiddenimports += collect_submodules('packaging')\n \n-hiddenimports += ['pkg_resources.markers']\n-\n # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n # utilities.\n", "issue": "`hook-pkg_resources.py` is including hidden imports that are not available\nhttps://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L20\r\n\r\nand\r\n\r\nhttps://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L30\r\n\r\nRefer to https://github.com/pypa/setuptools/issues/1963 for the past need for including `py2_warn` and [the subsequent removal of py2_warn from setuptools](https://github.com/pypa/setuptools/pull/2238).\r\n\r\nThe `pkg_resources.py2_warn` submodule was introduced in v45.0.0 and removed in v49.1.1. Therefore, an check for those versions would look like this:\r\n```python\r\nif is_module_satisfies(\"setuptools >= 45.0.0, < 49.1.1\"):\r\n hiddenimports.append('pkg_resources.py2_warn')\r\n```\r\n\r\nAs for `pkg_resources.markers`, I haven't found any references to any `pkg_resources.markers` packages through Google searches. I'm not convinced that it ever existed at any point. Perhaps it was being confused for (or just a typo of) `packaging.markers`, which does exist. Since `hook-pkg_resources.py` already collects the submodules of `packaging`, there doesn't need to be an extra hidden import added for `packaging.markers`. Therefore, I think that the `pkg_resources.markers` hidden import line can be removed completely.\r\n\r\nAnother module that it is trying to include is `pkg_resources._vendor.pyparsing.diagrams`. This module can only be used when the `railroad` module is available. Therefore, a check should be added to make sure that the `railroad` module is available, and filter out the diagrams module when it isn't.\r\n\r\nFor example:\r\n```python\r\nfrom PyInstaller.utils.hooks import (\r\n collect_submodules,\r\n is_module_satisfies,\r\n can_import_module,\r\n is_module_or_submodule\r\n)\r\n\r\nhiddenimports = []\r\n\r\n# pkg_resources keeps vendored modules in its _vendor subpackage and does sys.meta_path based import magic to expose\r\n# them as pkg_resources.extern.*\r\nif not can_import_module('railroad'):\r\n # The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\r\n # filter it out when `railroad` is not available.\r\n hiddenimports += collect_submodules(\r\n 'pkg_resources._vendor',\r\n lambda name: is_module_or_submodule(name, 'pkg_resources._vendor.pyparsing.diagrams'))\r\nelse:\r\n hiddenimports += collect_submodules('pkg_resources._vendor')\r\n```\r\n\r\nPyInstaller: v5.1\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nhiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n# present.\nhiddenimports += collect_submodules('packaging')\n\nhiddenimports += ['pkg_resources.markers']\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif is_module_satisfies(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif is_module_satisfies(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]}
1,905
552
gh_patches_debug_2753
rasdani/github-patches
git_diff
SeldonIO__MLServer-1172
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Star imports from `mlserver.codecs` not working For example: ```python from mlserver.codecs import * ``` Throws an error: ```python Traceback (most recent call last): File "/home/janis/.conda/envs/py310/lib/python3.10/site-packages/IPython/core/interactiveshell.py", line 3460, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-2-b8cc62508f29>", line 1, in <module> from mlserver.codecs import * AttributeError: module 'mlserver.codecs' has no attribute 'StringRequestCodec' ``` This is likely because `__all__` is out-of-date with the actual imports. I haven't tested other sub-packages, but it might be worth looking at these. P.S. I'm not a big fan of `__all__` and star imports in particular, the main issue is that the existence of `__all__` gives rise to two public APIs which may diverge (as it has in this case). </issue> <code> [start of mlserver/codecs/__init__.py] 1 from .numpy import NumpyCodec, NumpyRequestCodec 2 from .pandas import PandasCodec 3 from .string import StringCodec 4 from .base64 import Base64Codec 5 from .datetime import DatetimeCodec 6 from .errors import CodecError 7 from .decorator import decode_args 8 from .base import ( 9 InputCodec, 10 RequestCodec, 11 register_input_codec, 12 register_request_codec, 13 InputCodecLike, 14 RequestCodecLike, 15 ) 16 from .utils import ( 17 DecodedParameterName, 18 has_decoded, 19 get_decoded, 20 get_decoded_or_raw, 21 encode_inference_response, 22 encode_response_output, 23 decode_request_input, 24 decode_inference_request, 25 ) 26 27 __all__ = [ 28 "CodecError", 29 "NumpyCodec", 30 "NumpyRequestCodec", 31 "StringCodec", 32 "StringRequestCodec", 33 "Base64Codec", 34 "DatetimeCodec", 35 "PandasCodec", 36 "InputCodec", 37 "InputCodecLike", 38 "RequestCodec", 39 "RequestCodecLike", 40 "DecodedParameterName", 41 "register_input_codec", 42 "register_request_codec", 43 "has_decoded", 44 "get_decoded", 45 "get_decoded_or_raw", 46 "encode_inference_response", 47 "encode_response_output", 48 "decode_request_input", 49 "decode_inference_request", 50 "decode_args", 51 ] 52 [end of mlserver/codecs/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlserver/codecs/__init__.py b/mlserver/codecs/__init__.py --- a/mlserver/codecs/__init__.py +++ b/mlserver/codecs/__init__.py @@ -1,6 +1,6 @@ from .numpy import NumpyCodec, NumpyRequestCodec from .pandas import PandasCodec -from .string import StringCodec +from .string import StringCodec, StringRequestCodec from .base64 import Base64Codec from .datetime import DatetimeCodec from .errors import CodecError
{"golden_diff": "diff --git a/mlserver/codecs/__init__.py b/mlserver/codecs/__init__.py\n--- a/mlserver/codecs/__init__.py\n+++ b/mlserver/codecs/__init__.py\n@@ -1,6 +1,6 @@\n from .numpy import NumpyCodec, NumpyRequestCodec\n from .pandas import PandasCodec\n-from .string import StringCodec\n+from .string import StringCodec, StringRequestCodec\n from .base64 import Base64Codec\n from .datetime import DatetimeCodec\n from .errors import CodecError\n", "issue": "Star imports from `mlserver.codecs` not working\nFor example:\r\n\r\n```python\r\nfrom mlserver.codecs import *\r\n```\r\nThrows an error:\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/janis/.conda/envs/py310/lib/python3.10/site-packages/IPython/core/interactiveshell.py\", line 3460, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-2-b8cc62508f29>\", line 1, in <module>\r\n from mlserver.codecs import *\r\nAttributeError: module 'mlserver.codecs' has no attribute 'StringRequestCodec'\r\n```\r\n\r\nThis is likely because `__all__` is out-of-date with the actual imports. I haven't tested other sub-packages, but it might be worth looking at these.\r\n\r\nP.S. I'm not a big fan of `__all__` and star imports in particular, the main issue is that the existence of `__all__` gives rise to two public APIs which may diverge (as it has in this case).\n", "before_files": [{"content": "from .numpy import NumpyCodec, NumpyRequestCodec\nfrom .pandas import PandasCodec\nfrom .string import StringCodec\nfrom .base64 import Base64Codec\nfrom .datetime import DatetimeCodec\nfrom .errors import CodecError\nfrom .decorator import decode_args\nfrom .base import (\n InputCodec,\n RequestCodec,\n register_input_codec,\n register_request_codec,\n InputCodecLike,\n RequestCodecLike,\n)\nfrom .utils import (\n DecodedParameterName,\n has_decoded,\n get_decoded,\n get_decoded_or_raw,\n encode_inference_response,\n encode_response_output,\n decode_request_input,\n decode_inference_request,\n)\n\n__all__ = [\n \"CodecError\",\n \"NumpyCodec\",\n \"NumpyRequestCodec\",\n \"StringCodec\",\n \"StringRequestCodec\",\n \"Base64Codec\",\n \"DatetimeCodec\",\n \"PandasCodec\",\n \"InputCodec\",\n \"InputCodecLike\",\n \"RequestCodec\",\n \"RequestCodecLike\",\n \"DecodedParameterName\",\n \"register_input_codec\",\n \"register_request_codec\",\n \"has_decoded\",\n \"get_decoded\",\n \"get_decoded_or_raw\",\n \"encode_inference_response\",\n \"encode_response_output\",\n \"decode_request_input\",\n \"decode_inference_request\",\n \"decode_args\",\n]\n", "path": "mlserver/codecs/__init__.py"}]}
1,175
122
gh_patches_debug_19124
rasdani/github-patches
git_diff
TheAlgorithms__Python-664
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> errors in balanced_parentheses.py https://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L13-L17 `Line 16` returns wrong result due to `not` `Line 15` can cause error for inputs like **))**. before calling _pop_ it should be checked if stack is empty. https://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L3 there is no module named **.Stack**. it should be _from **s**tack import Stack_ </issue> <code> [start of data_structures/stacks/balanced_parentheses.py] 1 from __future__ import print_function 2 from __future__ import absolute_import 3 from .Stack import Stack 4 5 __author__ = 'Omkar Pathak' 6 7 8 def balanced_parentheses(parentheses): 9 """ Use a stack to check if a string of parentheses is balanced.""" 10 stack = Stack(len(parentheses)) 11 for parenthesis in parentheses: 12 if parenthesis == '(': 13 stack.push(parenthesis) 14 elif parenthesis == ')': 15 stack.pop() 16 return not stack.is_empty() 17 18 19 if __name__ == '__main__': 20 examples = ['((()))', '((())'] 21 print('Balanced parentheses demonstration:\n') 22 for example in examples: 23 print(example + ': ' + str(balanced_parentheses(example))) 24 [end of data_structures/stacks/balanced_parentheses.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py --- a/data_structures/stacks/balanced_parentheses.py +++ b/data_structures/stacks/balanced_parentheses.py @@ -1,6 +1,6 @@ from __future__ import print_function from __future__ import absolute_import -from .Stack import Stack +from stack import Stack __author__ = 'Omkar Pathak' @@ -12,12 +12,14 @@ if parenthesis == '(': stack.push(parenthesis) elif parenthesis == ')': + if stack.is_empty(): + return False stack.pop() - return not stack.is_empty() + return stack.is_empty() if __name__ == '__main__': - examples = ['((()))', '((())'] + examples = ['((()))', '((())', '(()))'] print('Balanced parentheses demonstration:\n') for example in examples: print(example + ': ' + str(balanced_parentheses(example)))
{"golden_diff": "diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py\n--- a/data_structures/stacks/balanced_parentheses.py\n+++ b/data_structures/stacks/balanced_parentheses.py\n@@ -1,6 +1,6 @@\n from __future__ import print_function\n from __future__ import absolute_import\n-from .Stack import Stack\n+from stack import Stack\n \n __author__ = 'Omkar Pathak'\n \n@@ -12,12 +12,14 @@\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n+ if stack.is_empty():\n+ return False\n stack.pop()\n- return not stack.is_empty()\n+ return stack.is_empty()\n \n \n if __name__ == '__main__':\n- examples = ['((()))', '((())']\n+ examples = ['((()))', '((())', '(()))']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "issue": "errors in balanced_parentheses.py\nhttps://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L13-L17\r\n\r\n`Line 16` returns wrong result due to `not`\r\n\r\n`Line 15` can cause error for inputs like **))**. before calling _pop_ it should be checked if stack is empty.\r\n\r\nhttps://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L3\r\n\r\nthere is no module named **.Stack**. it should be\r\n _from **s**tack import Stack_\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom .Stack import Stack\n\n__author__ = 'Omkar Pathak'\n\n\ndef balanced_parentheses(parentheses):\n \"\"\" Use a stack to check if a string of parentheses is balanced.\"\"\"\n stack = Stack(len(parentheses))\n for parenthesis in parentheses:\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n stack.pop()\n return not stack.is_empty()\n\n\nif __name__ == '__main__':\n examples = ['((()))', '((())']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "path": "data_structures/stacks/balanced_parentheses.py"}]}
952
245
gh_patches_debug_26805
rasdani/github-patches
git_diff
streamlink__streamlink-1268
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Kanal7 Plugin defective! Hi there, can you have a look on the kanal7.py please? "error: No playable streams found on this URL" Greetings </issue> <code> [start of src/streamlink/plugins/kanal7.py] 1 from __future__ import print_function 2 import re 3 4 from streamlink.plugin import Plugin 5 from streamlink.plugin.api import http 6 from streamlink.plugin.api import validate 7 from streamlink.stream import HLSStream 8 9 10 class Kanal7(Plugin): 11 url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") 12 iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') 13 stream_re = re.compile(r'src: "(http[^"]*?)"') 14 15 @classmethod 16 def can_handle_url(cls, url): 17 return cls.url_re.match(url) is not None 18 19 def find_iframe(self, url): 20 res = http.get(url) 21 # find iframe url 22 iframe = self.iframe_re.search(res.text) 23 iframe_url = iframe and iframe.group(1) 24 if iframe_url: 25 self.logger.debug("Found iframe: {}", iframe_url) 26 return iframe_url 27 28 def _get_streams(self): 29 iframe1 = self.find_iframe(self.url) 30 if iframe1: 31 iframe2 = self.find_iframe(iframe1) 32 if iframe2: 33 ires = http.get(iframe2) 34 stream_m = self.stream_re.search(ires.text) 35 stream_url = stream_m and stream_m.group(1) 36 if stream_url: 37 yield "live", HLSStream(self.session, stream_url) 38 else: 39 self.logger.error("Could not find second iframe, has the page layout changed?") 40 else: 41 self.logger.error("Could not find iframe, has the page layout changed?") 42 43 44 __plugin__ = Kanal7 45 [end of src/streamlink/plugins/kanal7.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py --- a/src/streamlink/plugins/kanal7.py +++ b/src/streamlink/plugins/kanal7.py @@ -3,6 +3,7 @@ from streamlink.plugin import Plugin from streamlink.plugin.api import http +from streamlink.plugin.api import useragents from streamlink.plugin.api import validate from streamlink.stream import HLSStream @@ -10,7 +11,7 @@ class Kanal7(Plugin): url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') - stream_re = re.compile(r'src: "(http[^"]*?)"') + stream_re = re.compile(r'src="(http[^"]*?)"') @classmethod def can_handle_url(cls, url): @@ -34,7 +35,7 @@ stream_m = self.stream_re.search(ires.text) stream_url = stream_m and stream_m.group(1) if stream_url: - yield "live", HLSStream(self.session, stream_url) + yield "live", HLSStream(self.session, stream_url, headers={"Referer": iframe2}) else: self.logger.error("Could not find second iframe, has the page layout changed?") else:
{"golden_diff": "diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py\n--- a/src/streamlink/plugins/kanal7.py\n+++ b/src/streamlink/plugins/kanal7.py\n@@ -3,6 +3,7 @@\n \n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n+from streamlink.plugin.api import useragents\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n \n@@ -10,7 +11,7 @@\n class Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n- stream_re = re.compile(r'src: \"(http[^\"]*?)\"')\n+ stream_re = re.compile(r'src=\"(http[^\"]*?)\"')\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -34,7 +35,7 @@\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n- yield \"live\", HLSStream(self.session, stream_url)\n+ yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n", "issue": "Kanal7 Plugin defective!\nHi there,\r\n\r\ncan you have a look on the kanal7.py please?\r\n\r\n\"error: No playable streams found on this URL\"\r\n\r\nGreetings\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'src: \"(http[^\"]*?)\"')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url)\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}]}
1,013
318
gh_patches_debug_23275
rasdani/github-patches
git_diff
wagtail__wagtail-3304
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> draft and live action labels are unclear ![screen shot 2017-01-05 at 12 42 52](https://cloud.githubusercontent.com/assets/7290588/21680947/944efe00-d344-11e6-9cbd-870f91fe9d5f.png) I don't think it's clear what the 'draft' and 'live' buttons will do. Could we include the full labels 'Preview draft' and 'View live'? </issue> <code> [start of wagtail/wagtailadmin/wagtail_hooks.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from django import forms 4 from django.contrib.auth.models import Permission 5 from django.contrib.staticfiles.templatetags.staticfiles import static 6 from django.core.urlresolvers import reverse 7 from django.utils.translation import ugettext_lazy as _ 8 from wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu 9 from wagtail.wagtailadmin.search import SearchArea 10 from wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton 11 from wagtail.wagtailcore import hooks 12 from wagtail.wagtailcore.permissions import collection_permission_policy 13 14 15 class ExplorerMenuItem(MenuItem): 16 @property 17 def media(self): 18 return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')]) 19 20 21 @hooks.register('register_admin_menu_item') 22 def register_explorer_menu_item(): 23 return ExplorerMenuItem( 24 _('Explorer'), reverse('wagtailadmin_explore_root'), 25 name='explorer', 26 classnames='icon icon-folder-open-inverse dl-trigger', 27 attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')}, 28 order=100) 29 30 31 class SettingsMenuItem(SubmenuMenuItem): 32 template = 'wagtailadmin/shared/menu_settings_menu_item.html' 33 34 35 @hooks.register('register_admin_menu_item') 36 def register_settings_menu(): 37 return SettingsMenuItem( 38 _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000) 39 40 41 @hooks.register('register_permissions') 42 def register_permissions(): 43 return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin') 44 45 46 @hooks.register('register_admin_search_area') 47 def register_pages_search_area(): 48 return SearchArea( 49 _('Pages'), reverse('wagtailadmin_pages:search'), 50 name='pages', 51 classnames='icon icon-folder-open-inverse', 52 order=100) 53 54 55 class CollectionsMenuItem(MenuItem): 56 def is_shown(self, request): 57 return collection_permission_policy.user_has_any_permission( 58 request.user, ['add', 'change', 'delete'] 59 ) 60 61 62 @hooks.register('register_settings_menu_item') 63 def register_collections_menu_item(): 64 return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700) 65 66 67 @hooks.register('register_page_listing_buttons') 68 def page_listing_buttons(page, page_perms, is_parent=False): 69 if page_perms.can_edit(): 70 yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]), 71 attrs={'title': _('Edit this page')}, priority=10) 72 if page.has_unpublished_changes: 73 yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]), 74 attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20) 75 if page.live and page.url: 76 yield PageListingButton(_('Live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30) 77 if page_perms.can_add_subpage(): 78 if is_parent: 79 yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]), 80 attrs={'title': _("Add a child page to '{0}' ").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40) 81 else: 82 yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]), 83 attrs={'title': _("Add a child page to '{0}' ").format(page.get_admin_display_title())}, priority=40) 84 85 yield ButtonWithDropdownFromHook( 86 _('More'), 87 hook_name='register_page_listing_more_buttons', 88 page=page, 89 page_perms=page_perms, 90 is_parent=is_parent, 91 attrs={'target': '_blank', 'title': _('View more options')}, priority=50) 92 93 94 @hooks.register('register_page_listing_more_buttons') 95 def page_listing_more_buttons(page, page_perms, is_parent=False): 96 if page_perms.can_move(): 97 yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]), 98 attrs={"title": _('Move this page')}, priority=10) 99 if not page.is_root(): 100 yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]), 101 attrs={'title': _('Copy this page')}, priority=20) 102 if page_perms.can_delete(): 103 yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]), 104 attrs={'title': _('Delete this page')}, priority=30) 105 if page_perms.can_unpublish(): 106 yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]), 107 attrs={'title': _('Unpublish this page')}, priority=40) 108 if not page.is_root(): 109 yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]), 110 attrs={'title': _("View this page's revision history")}, priority=50) 111 [end of wagtail/wagtailadmin/wagtail_hooks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailadmin/wagtail_hooks.py b/wagtail/wagtailadmin/wagtail_hooks.py --- a/wagtail/wagtailadmin/wagtail_hooks.py +++ b/wagtail/wagtailadmin/wagtail_hooks.py @@ -70,10 +70,10 @@ yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]), attrs={'title': _('Edit this page')}, priority=10) if page.has_unpublished_changes: - yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]), + yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]), attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20) if page.live and page.url: - yield PageListingButton(_('Live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30) + yield PageListingButton(_('View live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30) if page_perms.can_add_subpage(): if is_parent: yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),
{"golden_diff": "diff --git a/wagtail/wagtailadmin/wagtail_hooks.py b/wagtail/wagtailadmin/wagtail_hooks.py\n--- a/wagtail/wagtailadmin/wagtail_hooks.py\n+++ b/wagtail/wagtailadmin/wagtail_hooks.py\n@@ -70,10 +70,10 @@\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n- yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n+ yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n- yield PageListingButton(_('Live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n+ yield PageListingButton(_('View live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n", "issue": "draft and live action labels are unclear \n![screen shot 2017-01-05 at 12 42 52](https://cloud.githubusercontent.com/assets/7290588/21680947/944efe00-d344-11e6-9cbd-870f91fe9d5f.png)\r\n\r\nI don't think it's clear what the 'draft' and 'live' buttons will do. Could we include the full labels 'Preview draft' and 'View live'? \n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import forms\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu\nfrom wagtail.wagtailadmin.search import SearchArea\nfrom wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.permissions import collection_permission_policy\n\n\nclass ExplorerMenuItem(MenuItem):\n @property\n def media(self):\n return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')])\n\n\[email protected]('register_admin_menu_item')\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _('Explorer'), reverse('wagtailadmin_explore_root'),\n name='explorer',\n classnames='icon icon-folder-open-inverse dl-trigger',\n attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')},\n order=100)\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_settings_menu_item.html'\n\n\[email protected]('register_admin_menu_item')\ndef register_settings_menu():\n return SettingsMenuItem(\n _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000)\n\n\[email protected]('register_permissions')\ndef register_permissions():\n return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')\n\n\[email protected]('register_admin_search_area')\ndef register_pages_search_area():\n return SearchArea(\n _('Pages'), reverse('wagtailadmin_pages:search'),\n name='pages',\n classnames='icon icon-folder-open-inverse',\n order=100)\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\[email protected]('register_settings_menu_item')\ndef register_collections_menu_item():\n return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700)\n\n\[email protected]('register_page_listing_buttons')\ndef page_listing_buttons(page, page_perms, is_parent=False):\n if page_perms.can_edit():\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n yield PageListingButton(_('Live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40)\n else:\n yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, priority=40)\n\n yield ButtonWithDropdownFromHook(\n _('More'),\n hook_name='register_page_listing_more_buttons',\n page=page,\n page_perms=page_perms,\n is_parent=is_parent,\n attrs={'target': '_blank', 'title': _('View more options')}, priority=50)\n\n\[email protected]('register_page_listing_more_buttons')\ndef page_listing_more_buttons(page, page_perms, is_parent=False):\n if page_perms.can_move():\n yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]),\n attrs={\"title\": _('Move this page')}, priority=10)\n if not page.is_root():\n yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]),\n attrs={'title': _('Copy this page')}, priority=20)\n if page_perms.can_delete():\n yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]),\n attrs={'title': _('Delete this page')}, priority=30)\n if page_perms.can_unpublish():\n yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]),\n attrs={'title': _('Unpublish this page')}, priority=40)\n if not page.is_root():\n yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]),\n attrs={'title': _(\"View this page's revision history\")}, priority=50)\n", "path": "wagtail/wagtailadmin/wagtail_hooks.py"}]}
2,022
302
gh_patches_debug_14487
rasdani/github-patches
git_diff
svthalia__concrexit-2853
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make API redirects respect query params <!-- Please add the appropriate label for what change should be made: docs: changes to the documentation) refactor: refactoring production code, eg. renaming a variable or rewriting a function test: adding missing tests, refactoring tests; no production code change chore: updating poetry etc; no production code change --> ### Describe the change As seen in #2828 and #2821, some old API endpoints are redirected to their new replacements. This redirection does not propagate any query parameters. It would be nice to pass query parameters along when redirecting to a replacement API endpoint to prevent breaking consumers. ### Motivation #2828 #2821 </issue> <code> [start of website/partners/api/v2/urls.py] 1 """Partners app API v2 urls.""" 2 from django.urls import path 3 from django.views.generic import RedirectView 4 5 from partners.api.v2.views import ( 6 PartnerDetailView, 7 PartnerListView, 8 VacancyCategoryListView, 9 VacancyDetailView, 10 VacancyListView, 11 ) 12 13 app_name = "partners" 14 15 urlpatterns = [ 16 path( 17 "partners/events/", 18 RedirectView.as_view( 19 pattern_name="api:v2:events:external-events-list", permanent=False 20 ), 21 name="partner-events-list", 22 ), 23 path( 24 "partners/events/<int:pk>/", 25 RedirectView.as_view( 26 pattern_name="api:v2:events:external-event-detail", permanent=False 27 ), 28 name="partner-events-detail", 29 ), 30 path("partners/vacancies/", VacancyListView.as_view(), name="vacancies-list"), 31 path( 32 "partners/vacancies/categories/", 33 VacancyCategoryListView.as_view(), 34 name="vacancy-categories-list", 35 ), 36 path( 37 "partners/vacancies/<int:pk>/", 38 VacancyDetailView.as_view(), 39 name="vacancies-detail", 40 ), 41 path("partners/", PartnerListView.as_view(), name="partners-list"), 42 path("partners/<int:pk>/", PartnerDetailView.as_view(), name="partners-detail"), 43 ] 44 [end of website/partners/api/v2/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/partners/api/v2/urls.py b/website/partners/api/v2/urls.py --- a/website/partners/api/v2/urls.py +++ b/website/partners/api/v2/urls.py @@ -16,14 +16,18 @@ path( "partners/events/", RedirectView.as_view( - pattern_name="api:v2:events:external-events-list", permanent=False + pattern_name="api:v2:events:external-events-list", + permanent=False, + query_string=True, ), name="partner-events-list", ), path( "partners/events/<int:pk>/", RedirectView.as_view( - pattern_name="api:v2:events:external-event-detail", permanent=False + pattern_name="api:v2:events:external-event-detail", + permanent=False, + query_string=True, ), name="partner-events-detail", ),
{"golden_diff": "diff --git a/website/partners/api/v2/urls.py b/website/partners/api/v2/urls.py\n--- a/website/partners/api/v2/urls.py\n+++ b/website/partners/api/v2/urls.py\n@@ -16,14 +16,18 @@\n path(\n \"partners/events/\",\n RedirectView.as_view(\n- pattern_name=\"api:v2:events:external-events-list\", permanent=False\n+ pattern_name=\"api:v2:events:external-events-list\",\n+ permanent=False,\n+ query_string=True,\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n- pattern_name=\"api:v2:events:external-event-detail\", permanent=False\n+ pattern_name=\"api:v2:events:external-event-detail\",\n+ permanent=False,\n+ query_string=True,\n ),\n name=\"partner-events-detail\",\n ),\n", "issue": "Make API redirects respect query params\n<!--\n\nPlease add the appropriate label for what change should be made:\ndocs: changes to the documentation)\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\ntest: adding missing tests, refactoring tests; no production code change\nchore: updating poetry etc; no production code change\n\n-->\n\n### Describe the change\nAs seen in #2828 and #2821, some old API endpoints are redirected to their new replacements. This redirection does not propagate any query parameters. It would be nice to pass query parameters along when redirecting to a replacement API endpoint to prevent breaking consumers.\n\n### Motivation\n#2828 #2821\n", "before_files": [{"content": "\"\"\"Partners app API v2 urls.\"\"\"\nfrom django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom partners.api.v2.views import (\n PartnerDetailView,\n PartnerListView,\n VacancyCategoryListView,\n VacancyDetailView,\n VacancyListView,\n)\n\napp_name = \"partners\"\n\nurlpatterns = [\n path(\n \"partners/events/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-events-list\", permanent=False\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-event-detail\", permanent=False\n ),\n name=\"partner-events-detail\",\n ),\n path(\"partners/vacancies/\", VacancyListView.as_view(), name=\"vacancies-list\"),\n path(\n \"partners/vacancies/categories/\",\n VacancyCategoryListView.as_view(),\n name=\"vacancy-categories-list\",\n ),\n path(\n \"partners/vacancies/<int:pk>/\",\n VacancyDetailView.as_view(),\n name=\"vacancies-detail\",\n ),\n path(\"partners/\", PartnerListView.as_view(), name=\"partners-list\"),\n path(\"partners/<int:pk>/\", PartnerDetailView.as_view(), name=\"partners-detail\"),\n]\n", "path": "website/partners/api/v2/urls.py"}]}
1,044
209
gh_patches_debug_8139
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-5957
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve msg error for submodules with invalid url schemas We only support `http` schemas, but the error message isn't clear about that. Here is a good suggestion: https://github.com/readthedocs/readthedocs.org/issues/5928#issuecomment-511708197 </issue> <code> [start of readthedocs/projects/exceptions.py] 1 # -*- coding: utf-8 -*- 2 3 """Project exceptions.""" 4 5 from django.conf import settings 6 from django.utils.translation import ugettext_noop as _ 7 8 from readthedocs.doc_builder.exceptions import BuildEnvironmentError 9 10 11 class ProjectConfigurationError(BuildEnvironmentError): 12 13 """Error raised trying to configure a project for build.""" 14 15 NOT_FOUND = _( 16 'A configuration file was not found. ' 17 'Make sure you have a conf.py file in your repository.', 18 ) 19 20 MULTIPLE_CONF_FILES = _( 21 'We found more than one conf.py and are not sure which one to use. ' 22 'Please, specify the correct file under the Advanced settings tab ' 23 "in the project's Admin.", 24 ) 25 26 27 class RepositoryError(BuildEnvironmentError): 28 29 """Failure during repository operation.""" 30 31 PRIVATE_ALLOWED = _( 32 'There was a problem connecting to your repository, ' 33 'ensure that your repository URL is correct.', 34 ) 35 PRIVATE_NOT_ALLOWED = _( 36 'There was a problem connecting to your repository, ' 37 'ensure that your repository URL is correct and your repository is public. ' 38 'Private repositories are not supported.', 39 ) 40 41 INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.') 42 INVALID_SUBMODULES_PATH = _( 43 'One or more submodule paths are not valid. ' 44 'Check that all your submodules in .gitmodules are used.' 45 ) 46 47 DUPLICATED_RESERVED_VERSIONS = _( 48 'You can not have two versions with the name latest or stable.', 49 ) 50 51 FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}') 52 53 def get_default_message(self): 54 if settings.ALLOW_PRIVATE_REPOS: 55 return self.PRIVATE_ALLOWED 56 return self.PRIVATE_NOT_ALLOWED 57 58 59 class ProjectSpamError(Exception): 60 61 """ 62 Error raised when a project field has detected spam. 63 64 This error is not raised to users, we use this for banning users in the 65 background. 66 """ 67 [end of readthedocs/projects/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py --- a/readthedocs/projects/exceptions.py +++ b/readthedocs/projects/exceptions.py @@ -38,7 +38,10 @@ 'Private repositories are not supported.', ) - INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.') + INVALID_SUBMODULES = _( + 'One or more submodule URLs are not valid: {}, ' + 'git/ssh URL schemas for submodules are not supported.' + ) INVALID_SUBMODULES_PATH = _( 'One or more submodule paths are not valid. ' 'Check that all your submodules in .gitmodules are used.'
{"golden_diff": "diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py\n--- a/readthedocs/projects/exceptions.py\n+++ b/readthedocs/projects/exceptions.py\n@@ -38,7 +38,10 @@\n 'Private repositories are not supported.',\n )\n \n- INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')\n+ INVALID_SUBMODULES = _(\n+ 'One or more submodule URLs are not valid: {}, '\n+ 'git/ssh URL schemas for submodules are not supported.'\n+ )\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n", "issue": "Improve msg error for submodules with invalid url schemas\nWe only support `http` schemas, but the error message isn't clear about that. Here is a good suggestion: https://github.com/readthedocs/readthedocs.org/issues/5928#issuecomment-511708197\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Project exceptions.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_noop as _\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\n\nclass ProjectConfigurationError(BuildEnvironmentError):\n\n \"\"\"Error raised trying to configure a project for build.\"\"\"\n\n NOT_FOUND = _(\n 'A configuration file was not found. '\n 'Make sure you have a conf.py file in your repository.',\n )\n\n MULTIPLE_CONF_FILES = _(\n 'We found more than one conf.py and are not sure which one to use. '\n 'Please, specify the correct file under the Advanced settings tab '\n \"in the project's Admin.\",\n )\n\n\nclass RepositoryError(BuildEnvironmentError):\n\n \"\"\"Failure during repository operation.\"\"\"\n\n PRIVATE_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct.',\n )\n PRIVATE_NOT_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n\n INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n )\n\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\n\n FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')\n\n def get_default_message(self):\n if settings.ALLOW_PRIVATE_REPOS:\n return self.PRIVATE_ALLOWED\n return self.PRIVATE_NOT_ALLOWED\n\n\nclass ProjectSpamError(Exception):\n\n \"\"\"\n Error raised when a project field has detected spam.\n\n This error is not raised to users, we use this for banning users in the\n background.\n \"\"\"\n", "path": "readthedocs/projects/exceptions.py"}]}
1,156
161
gh_patches_debug_16298
rasdani/github-patches
git_diff
getsentry__sentry-python-612
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DeprecationWarning in sqlalchemy integration At integrations/sqlalchemy.py line 28 - integration is listening for dbapi_error, this was deprecated in sqlalchemy a long time ago and should be replaced with handle_error event </issue> <code> [start of sentry_sdk/integrations/sqlalchemy.py] 1 from __future__ import absolute_import 2 3 from sentry_sdk._types import MYPY 4 from sentry_sdk.hub import Hub 5 from sentry_sdk.integrations import Integration 6 from sentry_sdk.tracing import record_sql_queries 7 8 from sqlalchemy.engine import Engine # type: ignore 9 from sqlalchemy.event import listen # type: ignore 10 11 if MYPY: 12 from typing import Any 13 from typing import ContextManager 14 from typing import Optional 15 16 from sentry_sdk.tracing import Span 17 18 19 class SqlalchemyIntegration(Integration): 20 identifier = "sqlalchemy" 21 22 @staticmethod 23 def setup_once(): 24 # type: () -> None 25 26 listen(Engine, "before_cursor_execute", _before_cursor_execute) 27 listen(Engine, "after_cursor_execute", _after_cursor_execute) 28 listen(Engine, "dbapi_error", _dbapi_error) 29 30 31 def _before_cursor_execute( 32 conn, cursor, statement, parameters, context, executemany, *args 33 ): 34 # type: (Any, Any, Any, Any, Any, bool, *Any) -> None 35 hub = Hub.current 36 if hub.get_integration(SqlalchemyIntegration) is None: 37 return 38 39 ctx_mgr = record_sql_queries( 40 hub, 41 cursor, 42 statement, 43 parameters, 44 paramstyle=context and context.dialect and context.dialect.paramstyle or None, 45 executemany=executemany, 46 ) 47 conn._sentry_sql_span_manager = ctx_mgr 48 49 span = ctx_mgr.__enter__() 50 51 if span is not None: 52 conn._sentry_sql_span = span 53 54 55 def _after_cursor_execute(conn, cursor, statement, *args): 56 # type: (Any, Any, Any, *Any) -> None 57 ctx_mgr = getattr( 58 conn, "_sentry_sql_span_manager", None 59 ) # type: ContextManager[Any] 60 61 if ctx_mgr is not None: 62 conn._sentry_sql_span_manager = None 63 ctx_mgr.__exit__(None, None, None) 64 65 66 def _dbapi_error(conn, *args): 67 # type: (Any, *Any) -> None 68 span = getattr(conn, "_sentry_sql_span", None) # type: Optional[Span] 69 70 if span is not None: 71 span.set_status("internal_error") 72 [end of sentry_sdk/integrations/sqlalchemy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py --- a/sentry_sdk/integrations/sqlalchemy.py +++ b/sentry_sdk/integrations/sqlalchemy.py @@ -25,7 +25,7 @@ listen(Engine, "before_cursor_execute", _before_cursor_execute) listen(Engine, "after_cursor_execute", _after_cursor_execute) - listen(Engine, "dbapi_error", _dbapi_error) + listen(Engine, "handle_error", _handle_error) def _before_cursor_execute( @@ -63,8 +63,9 @@ ctx_mgr.__exit__(None, None, None) -def _dbapi_error(conn, *args): +def _handle_error(context, *args): # type: (Any, *Any) -> None + conn = context.connection span = getattr(conn, "_sentry_sql_span", None) # type: Optional[Span] if span is not None:
{"golden_diff": "diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py\n--- a/sentry_sdk/integrations/sqlalchemy.py\n+++ b/sentry_sdk/integrations/sqlalchemy.py\n@@ -25,7 +25,7 @@\n \n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n- listen(Engine, \"dbapi_error\", _dbapi_error)\n+ listen(Engine, \"handle_error\", _handle_error)\n \n \n def _before_cursor_execute(\n@@ -63,8 +63,9 @@\n ctx_mgr.__exit__(None, None, None)\n \n \n-def _dbapi_error(conn, *args):\n+def _handle_error(context, *args):\n # type: (Any, *Any) -> None\n+ conn = context.connection\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n \n if span is not None:\n", "issue": "DeprecationWarning in sqlalchemy integration\nAt integrations/sqlalchemy.py line 28 - integration is listening for dbapi_error, this was deprecated in sqlalchemy a long time ago and should be replaced with handle_error event\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.tracing import record_sql_queries\n\nfrom sqlalchemy.engine import Engine # type: ignore\nfrom sqlalchemy.event import listen # type: ignore\n\nif MYPY:\n from typing import Any\n from typing import ContextManager\n from typing import Optional\n\n from sentry_sdk.tracing import Span\n\n\nclass SqlalchemyIntegration(Integration):\n identifier = \"sqlalchemy\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n listen(Engine, \"dbapi_error\", _dbapi_error)\n\n\ndef _before_cursor_execute(\n conn, cursor, statement, parameters, context, executemany, *args\n):\n # type: (Any, Any, Any, Any, Any, bool, *Any) -> None\n hub = Hub.current\n if hub.get_integration(SqlalchemyIntegration) is None:\n return\n\n ctx_mgr = record_sql_queries(\n hub,\n cursor,\n statement,\n parameters,\n paramstyle=context and context.dialect and context.dialect.paramstyle or None,\n executemany=executemany,\n )\n conn._sentry_sql_span_manager = ctx_mgr\n\n span = ctx_mgr.__enter__()\n\n if span is not None:\n conn._sentry_sql_span = span\n\n\ndef _after_cursor_execute(conn, cursor, statement, *args):\n # type: (Any, Any, Any, *Any) -> None\n ctx_mgr = getattr(\n conn, \"_sentry_sql_span_manager\", None\n ) # type: ContextManager[Any]\n\n if ctx_mgr is not None:\n conn._sentry_sql_span_manager = None\n ctx_mgr.__exit__(None, None, None)\n\n\ndef _dbapi_error(conn, *args):\n # type: (Any, *Any) -> None\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n\n if span is not None:\n span.set_status(\"internal_error\")\n", "path": "sentry_sdk/integrations/sqlalchemy.py"}]}
1,229
224
gh_patches_debug_1197
rasdani/github-patches
git_diff
keras-team__keras-nlp-1166
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add compute_output_shape method to WordPieceTokenizer When we run Pretraining Transformer from Scratch guide with PyTorch and JAX backend, it raises ``` RuntimeError: Exception encountered when calling WordPieceTokenizer.call(). Could not automatically infer the output shape / dtype of 'word_piece_tokenizer_1' (of type WordPieceTokenizer). Either the `WordPieceTokenizer.call()` method is incorrect, or you need to implement the `WordPieceTokenizer.compute_output_spec() / compute_output_shape()` method. Error encountered: 'string' Arguments received by WordPieceTokenizer.call(): • args=('<KerasTensor shape=(None,), dtype=string, name=keras_tensor_59>',) • kwargs=<class 'inspect._empty'> ``` cc: @mattdangerw </issue> <code> [start of keras_nlp/tokenizers/tokenizer.py] 1 # Copyright 2023 The KerasNLP Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import List 16 17 from keras_nlp.api_export import keras_nlp_export 18 from keras_nlp.layers.preprocessing.preprocessing_layer import ( 19 PreprocessingLayer, 20 ) 21 22 23 @keras_nlp_export("keras_nlp.tokenizers.Tokenizer") 24 class Tokenizer(PreprocessingLayer): 25 """A base class for tokenizer layers. 26 27 Tokenizers in the KerasNLP library should all subclass this layer. 28 The class provides two core methods `tokenize()` and `detokenize()` for 29 going from plain text to sequences and back. A tokenizer is a subclass of 30 `keras.layers.Layer` and can be combined into a `keras.Model`. 31 32 Subclassers should always implement the `tokenize()` method, which will also 33 be the default when calling the layer directly on inputs. 34 35 Subclassers can optionally implement the `detokenize()` method if the 36 tokenization is reversible. Otherwise, this can be skipped. 37 38 Subclassers should implement `get_vocabulary()`, `vocabulary_size()`, 39 `token_to_id()` and `id_to_token()` if applicable. For some simple 40 "vocab free" tokenizers, such as a whitespace splitter show below, these 41 methods do not apply and can be skipped. 42 43 Examples: 44 45 ```python 46 class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer): 47 def tokenize(self, inputs): 48 return tf.strings.split(inputs) 49 50 def detokenize(self, inputs): 51 return tf.strings.reduce_join(inputs, separator=" ", axis=-1) 52 53 tokenizer = WhitespaceSplitterTokenizer() 54 55 # Tokenize some inputs. 56 tokenizer.tokenize("This is a test") 57 58 # Shorthard for `tokenize()`. 59 tokenizer("This is a test") 60 61 # Detokenize some outputs. 62 tokenizer.detokenize(["This", "is", "a", "test"]) 63 ``` 64 """ 65 66 def __init__(self, *args, **kwargs): 67 super().__init__(*args, **kwargs) 68 69 def tokenize(self, inputs, *args, **kwargs): 70 """Transform input tensors of strings into output tokens. 71 72 Args: 73 inputs: Input tensor, or dict/list/tuple of input tensors. 74 *args: Additional positional arguments. 75 **kwargs: Additional keyword arguments. 76 """ 77 raise NotImplementedError( 78 "No implementation of `tokenize()` was found for " 79 f"{self.__class__.__name__}. All tokenizers should implement " 80 "`tokenize()`." 81 ) 82 83 def detokenize(self, inputs, *args, **kwargs): 84 """Transform tokens back into strings. 85 86 Args: 87 inputs: Input tensor, or dict/list/tuple of input tensors. 88 *args: Additional positional arguments. 89 **kwargs: Additional keyword arguments. 90 """ 91 raise NotImplementedError( 92 "No implementation of `detokenize()` was found for " 93 f"{self.__class__.__name__}." 94 ) 95 96 def get_vocabulary(self) -> List[str]: 97 """Get the tokenizer vocabulary as a list of strings terms.""" 98 raise NotImplementedError( 99 "No implementation of `get_vocabulary()` was found for " 100 f"{self.__class__.__name__}." 101 ) 102 103 def vocabulary_size(self) -> int: 104 """Returns the total size of the token id space.""" 105 raise NotImplementedError( 106 "No implementation of `vocabulary_size()` was found for " 107 f"{self.__class__.__name__}." 108 ) 109 110 def id_to_token(self, id: int) -> str: 111 """Convert an integer id to a string token.""" 112 raise NotImplementedError( 113 "No implementation of `id_to_token()` was found for " 114 f"{self.__class__.__name__}." 115 ) 116 117 def token_to_id(self, token: str) -> int: 118 """Convert an integer id to a string token.""" 119 raise NotImplementedError( 120 "No implementation of `id_to_token()` was found for " 121 f"{self.__class__.__name__}." 122 ) 123 124 def call(self, inputs, *args, training=None, **kwargs): 125 return self.tokenize(inputs, *args, **kwargs) 126 [end of keras_nlp/tokenizers/tokenizer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/keras_nlp/tokenizers/tokenizer.py b/keras_nlp/tokenizers/tokenizer.py --- a/keras_nlp/tokenizers/tokenizer.py +++ b/keras_nlp/tokenizers/tokenizer.py @@ -123,3 +123,6 @@ def call(self, inputs, *args, training=None, **kwargs): return self.tokenize(inputs, *args, **kwargs) + + def compute_output_shape(self, inputs_shape): + return tuple(inputs_shape) + (self.sequence_length,)
{"golden_diff": "diff --git a/keras_nlp/tokenizers/tokenizer.py b/keras_nlp/tokenizers/tokenizer.py\n--- a/keras_nlp/tokenizers/tokenizer.py\n+++ b/keras_nlp/tokenizers/tokenizer.py\n@@ -123,3 +123,6 @@\n \n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n+\n+ def compute_output_shape(self, inputs_shape):\n+ return tuple(inputs_shape) + (self.sequence_length,)\n", "issue": "Add compute_output_shape method to WordPieceTokenizer\nWhen we run Pretraining Transformer from Scratch guide with PyTorch and JAX backend, it raises\r\n\r\n```\r\n\r\nRuntimeError: Exception encountered when calling WordPieceTokenizer.call().\r\n\r\nCould not automatically infer the output shape / dtype of 'word_piece_tokenizer_1' (of type WordPieceTokenizer). Either the `WordPieceTokenizer.call()` method is incorrect, or you need to implement the `WordPieceTokenizer.compute_output_spec() / compute_output_shape()` method. Error encountered:\r\n\r\n'string'\r\n\r\nArguments received by WordPieceTokenizer.call():\r\n \u2022 args=('<KerasTensor shape=(None,), dtype=string, name=keras_tensor_59>',)\r\n \u2022 kwargs=<class 'inspect._empty'>\r\n```\r\n\r\ncc: @mattdangerw \n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.Tokenizer\")\nclass Tokenizer(PreprocessingLayer):\n \"\"\"A base class for tokenizer layers.\n\n Tokenizers in the KerasNLP library should all subclass this layer.\n The class provides two core methods `tokenize()` and `detokenize()` for\n going from plain text to sequences and back. A tokenizer is a subclass of\n `keras.layers.Layer` and can be combined into a `keras.Model`.\n\n Subclassers should always implement the `tokenize()` method, which will also\n be the default when calling the layer directly on inputs.\n\n Subclassers can optionally implement the `detokenize()` method if the\n tokenization is reversible. Otherwise, this can be skipped.\n\n Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,\n `token_to_id()` and `id_to_token()` if applicable. For some simple\n \"vocab free\" tokenizers, such as a whitespace splitter show below, these\n methods do not apply and can be skipped.\n\n Examples:\n\n ```python\n class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):\n def tokenize(self, inputs):\n return tf.strings.split(inputs)\n\n def detokenize(self, inputs):\n return tf.strings.reduce_join(inputs, separator=\" \", axis=-1)\n\n tokenizer = WhitespaceSplitterTokenizer()\n\n # Tokenize some inputs.\n tokenizer.tokenize(\"This is a test\")\n\n # Shorthard for `tokenize()`.\n tokenizer(\"This is a test\")\n\n # Detokenize some outputs.\n tokenizer.detokenize([\"This\", \"is\", \"a\", \"test\"])\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform input tensors of strings into output tokens.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `tokenize()` was found for \"\n f\"{self.__class__.__name__}. All tokenizers should implement \"\n \"`tokenize()`.\"\n )\n\n def detokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform tokens back into strings.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `detokenize()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def get_vocabulary(self) -> List[str]:\n \"\"\"Get the tokenizer vocabulary as a list of strings terms.\"\"\"\n raise NotImplementedError(\n \"No implementation of `get_vocabulary()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def vocabulary_size(self) -> int:\n \"\"\"Returns the total size of the token id space.\"\"\"\n raise NotImplementedError(\n \"No implementation of `vocabulary_size()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def id_to_token(self, id: int) -> str:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def token_to_id(self, token: str) -> int:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n", "path": "keras_nlp/tokenizers/tokenizer.py"}]}
1,991
121
gh_patches_debug_9283
rasdani/github-patches
git_diff
locustio__locust-1918
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> the report cant show the right time <!-- If you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust You can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with "locust". Do not immediately post your issue here after posting to SO, wait for an answer there instead. Use this form only for reporting actual bugs in Locust. Remember, the developers of Locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :) Always make sure you are running an up to date Locust version (pip3 install -U locust) --> ### Describe the bug <!-- A clear and concise description of what the bug is --> ![image](https://user-images.githubusercontent.com/47803360/138236838-6d62d47c-aa4d-4de7-b714-d94b7dff639a.png) ![image](https://user-images.githubusercontent.com/47803360/138236861-d318f852-da06-41e9-abcb-fa18d47166c7.png) this picture is right time i am in china ### Expected behavior <!-- Tell us what you think should happen --> Wrong time zone reported ### Actual behavior <!-- Tell us what happens instead. Include screenshots if this an issue with the GUI. --> ### Steps to reproduce <!-- Please provide a minimal reproducible code example (https://stackoverflow.com/help/minimal-reproducible-example) --> ### Environment - OS: - Python version: - Locust version: (please dont file issues for anything but the most recent release or prerelease builds) - Locust command line that you ran: - Locust file contents (anonymized if necessary): </issue> <code> [start of locust/html.py] 1 from jinja2 import Environment, FileSystemLoader 2 import os 3 import pathlib 4 import datetime 5 from itertools import chain 6 from .stats import sort_stats 7 from .user.inspectuser import get_task_ratio_dict 8 from html import escape 9 from json import dumps 10 11 12 def render_template(file, **kwargs): 13 templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), "templates") 14 env = Environment(loader=FileSystemLoader(templates_path), extensions=["jinja2.ext.do"]) 15 template = env.get_template(file) 16 return template.render(**kwargs) 17 18 19 def get_html_report(environment, show_download_link=True): 20 stats = environment.runner.stats 21 22 start_ts = stats.start_time 23 start_time = datetime.datetime.fromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S") 24 25 end_ts = stats.last_request_timestamp 26 if end_ts: 27 end_time = datetime.datetime.fromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S") 28 else: 29 end_time = start_time 30 31 host = None 32 if environment.host: 33 host = environment.host 34 elif environment.runner.user_classes: 35 all_hosts = set([l.host for l in environment.runner.user_classes]) 36 if len(all_hosts) == 1: 37 host = list(all_hosts)[0] 38 39 requests_statistics = list(chain(sort_stats(stats.entries), [stats.total])) 40 failures_statistics = sort_stats(stats.errors) 41 exceptions_statistics = [ 42 {**exc, "nodes": ", ".join(exc["nodes"])} for exc in environment.runner.exceptions.values() 43 ] 44 45 history = stats.history 46 47 static_js = [] 48 js_files = ["jquery-1.11.3.min.js", "echarts.common.min.js", "vintage.js", "chart.js", "tasks.js"] 49 for js_file in js_files: 50 path = os.path.join(os.path.dirname(__file__), "static", js_file) 51 static_js.append("// " + js_file) 52 with open(path, encoding="utf8") as f: 53 static_js.append(f.read()) 54 static_js.extend(["", ""]) 55 56 static_css = [] 57 css_files = ["tables.css"] 58 for css_file in css_files: 59 path = os.path.join(os.path.dirname(__file__), "static", "css", css_file) 60 static_css.append("/* " + css_file + " */") 61 with open(path, encoding="utf8") as f: 62 static_css.append(f.read()) 63 static_css.extend(["", ""]) 64 65 task_data = { 66 "per_class": get_task_ratio_dict(environment.user_classes), 67 "total": get_task_ratio_dict(environment.user_classes, total=True), 68 } 69 70 res = render_template( 71 "report.html", 72 int=int, 73 round=round, 74 requests_statistics=requests_statistics, 75 failures_statistics=failures_statistics, 76 exceptions_statistics=exceptions_statistics, 77 start_time=start_time, 78 end_time=end_time, 79 host=host, 80 history=history, 81 static_js="\n".join(static_js), 82 static_css="\n".join(static_css), 83 show_download_link=show_download_link, 84 locustfile=environment.locustfile, 85 tasks=escape(dumps(task_data)), 86 ) 87 88 return res 89 [end of locust/html.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locust/html.py b/locust/html.py --- a/locust/html.py +++ b/locust/html.py @@ -20,11 +20,11 @@ stats = environment.runner.stats start_ts = stats.start_time - start_time = datetime.datetime.fromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S") + start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S") end_ts = stats.last_request_timestamp if end_ts: - end_time = datetime.datetime.fromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S") + end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S") else: end_time = start_time
{"golden_diff": "diff --git a/locust/html.py b/locust/html.py\n--- a/locust/html.py\n+++ b/locust/html.py\n@@ -20,11 +20,11 @@\n stats = environment.runner.stats\n \n start_ts = stats.start_time\n- start_time = datetime.datetime.fromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n+ start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n \n end_ts = stats.last_request_timestamp\n if end_ts:\n- end_time = datetime.datetime.fromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n+ end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n", "issue": "the report cant show the right time\n<!-- \r\nIf you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust\r\n\r\nYou can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with \"locust\". Do not immediately post your issue here after posting to SO, wait for an answer there instead.\r\n\r\nUse this form only for reporting actual bugs in Locust. Remember, the developers of Locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :) \r\n\r\nAlways make sure you are running an up to date Locust version (pip3 install -U locust)\r\n-->\r\n\r\n### Describe the bug\r\n<!-- A clear and concise description of what the bug is -->\r\n![image](https://user-images.githubusercontent.com/47803360/138236838-6d62d47c-aa4d-4de7-b714-d94b7dff639a.png)\r\n![image](https://user-images.githubusercontent.com/47803360/138236861-d318f852-da06-41e9-abcb-fa18d47166c7.png)\r\nthis picture is right time i am in china\r\n\r\n\r\n### Expected behavior\r\n<!-- Tell us what you think should happen -->\r\nWrong time zone reported\r\n### Actual behavior\r\n<!-- Tell us what happens instead. Include screenshots if this an issue with the GUI. -->\r\n\r\n### Steps to reproduce\r\n<!-- Please provide a minimal reproducible code example (https://stackoverflow.com/help/minimal-reproducible-example) --> \r\n\r\n### Environment\r\n\r\n- OS:\r\n- Python version:\r\n- Locust version: (please dont file issues for anything but the most recent release or prerelease builds)\r\n- Locust command line that you ran:\r\n- Locust file contents (anonymized if necessary):\n", "before_files": [{"content": "from jinja2 import Environment, FileSystemLoader\nimport os\nimport pathlib\nimport datetime\nfrom itertools import chain\nfrom .stats import sort_stats\nfrom .user.inspectuser import get_task_ratio_dict\nfrom html import escape\nfrom json import dumps\n\n\ndef render_template(file, **kwargs):\n templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), \"templates\")\n env = Environment(loader=FileSystemLoader(templates_path), extensions=[\"jinja2.ext.do\"])\n template = env.get_template(file)\n return template.render(**kwargs)\n\n\ndef get_html_report(environment, show_download_link=True):\n stats = environment.runner.stats\n\n start_ts = stats.start_time\n start_time = datetime.datetime.fromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n end_ts = stats.last_request_timestamp\n if end_ts:\n end_time = datetime.datetime.fromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n\n host = None\n if environment.host:\n host = environment.host\n elif environment.runner.user_classes:\n all_hosts = set([l.host for l in environment.runner.user_classes])\n if len(all_hosts) == 1:\n host = list(all_hosts)[0]\n\n requests_statistics = list(chain(sort_stats(stats.entries), [stats.total]))\n failures_statistics = sort_stats(stats.errors)\n exceptions_statistics = [\n {**exc, \"nodes\": \", \".join(exc[\"nodes\"])} for exc in environment.runner.exceptions.values()\n ]\n\n history = stats.history\n\n static_js = []\n js_files = [\"jquery-1.11.3.min.js\", \"echarts.common.min.js\", \"vintage.js\", \"chart.js\", \"tasks.js\"]\n for js_file in js_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", js_file)\n static_js.append(\"// \" + js_file)\n with open(path, encoding=\"utf8\") as f:\n static_js.append(f.read())\n static_js.extend([\"\", \"\"])\n\n static_css = []\n css_files = [\"tables.css\"]\n for css_file in css_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", \"css\", css_file)\n static_css.append(\"/* \" + css_file + \" */\")\n with open(path, encoding=\"utf8\") as f:\n static_css.append(f.read())\n static_css.extend([\"\", \"\"])\n\n task_data = {\n \"per_class\": get_task_ratio_dict(environment.user_classes),\n \"total\": get_task_ratio_dict(environment.user_classes, total=True),\n }\n\n res = render_template(\n \"report.html\",\n int=int,\n round=round,\n requests_statistics=requests_statistics,\n failures_statistics=failures_statistics,\n exceptions_statistics=exceptions_statistics,\n start_time=start_time,\n end_time=end_time,\n host=host,\n history=history,\n static_js=\"\\n\".join(static_js),\n static_css=\"\\n\".join(static_css),\n show_download_link=show_download_link,\n locustfile=environment.locustfile,\n tasks=escape(dumps(task_data)),\n )\n\n return res\n", "path": "locust/html.py"}]}
1,812
193
gh_patches_debug_5105
rasdani/github-patches
git_diff
encode__httpx-2442
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support httpcore>=0.16.0 Hi, I have a project that uses `fastapi` and `uvicorn` which demands `h11>=0.8`. Latest version of `fastapi` changed their test suite from `requests` to `httpx`. So I had to add `httpx` to my tests requirements. Now the problem is, that `httpx` requires `httpcore`, which since version `0.16.0` allows for `h11==0.14.0`, but the current version of `httpcore` required by `httpx` doesn't, so I end up having to downgrade the `h11==0.14.0` which I have to `0.12.0`. Can the constraint for `httpcore` be updated to `>=0.16.0` here? </issue> <code> [start of setup.py] 1 import sys 2 3 from setuptools import setup 4 5 sys.stderr.write( 6 """ 7 =============================== 8 Unsupported installation method 9 =============================== 10 httpx no longer supports installation with `python setup.py install`. 11 Please use `python -m pip install .` instead. 12 """ 13 ) 14 sys.exit(1) 15 16 17 # The below code will never execute, however GitHub is particularly 18 # picky about where it finds Python packaging metadata. 19 # See: https://github.com/github/feedback/discussions/6456 20 # 21 # To be removed once GitHub catches up. 22 23 setup( 24 name="httpx", 25 install_requires=[ 26 "certifi", 27 "sniffio", 28 "rfc3986[idna2008]>=1.3,<2", 29 "httpcore>=0.15.0,<0.16.0", 30 ], 31 ) 32 [end of setup.py] [start of httpx/__version__.py] 1 __title__ = "httpx" 2 __description__ = "A next generation HTTP client, for Python 3." 3 __version__ = "0.23.0" 4 [end of httpx/__version__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/httpx/__version__.py b/httpx/__version__.py --- a/httpx/__version__.py +++ b/httpx/__version__.py @@ -1,3 +1,3 @@ __title__ = "httpx" __description__ = "A next generation HTTP client, for Python 3." -__version__ = "0.23.0" +__version__ = "0.23.1" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -26,6 +26,6 @@ "certifi", "sniffio", "rfc3986[idna2008]>=1.3,<2", - "httpcore>=0.15.0,<0.16.0", + "httpcore>=0.15.0,<0.17.0", ], )
{"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.23.0\"\n+__version__ = \"0.23.1\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,6 +26,6 @@\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n- \"httpcore>=0.15.0,<0.16.0\",\n+ \"httpcore>=0.15.0,<0.17.0\",\n ],\n )\n", "issue": "Support httpcore>=0.16.0\nHi,\r\n\r\nI have a project that uses `fastapi` and `uvicorn` which demands `h11>=0.8`. Latest version of `fastapi` changed their test suite from `requests` to `httpx`. So I had to add `httpx` to my tests requirements.\r\n\r\nNow the problem is, that `httpx` requires `httpcore`, which since version `0.16.0` allows for `h11==0.14.0`, but the current version of `httpcore` required by `httpx` doesn't, so I end up having to downgrade the `h11==0.14.0` which I have to `0.12.0`.\r\n\r\nCan the constraint for `httpcore` be updated to `>=0.16.0` here?\n", "before_files": [{"content": "import sys\n\nfrom setuptools import setup\n\nsys.stderr.write(\n \"\"\"\n===============================\nUnsupported installation method\n===============================\nhttpx no longer supports installation with `python setup.py install`.\nPlease use `python -m pip install .` instead.\n\"\"\"\n)\nsys.exit(1)\n\n\n# The below code will never execute, however GitHub is particularly\n# picky about where it finds Python packaging metadata.\n# See: https://github.com/github/feedback/discussions/6456\n#\n# To be removed once GitHub catches up.\n\nsetup(\n name=\"httpx\",\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.15.0,<0.16.0\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.23.0\"\n", "path": "httpx/__version__.py"}]}
1,007
204
gh_patches_debug_210
rasdani/github-patches
git_diff
xonsh__xonsh-3049
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exception on startup (pygments_cache) <!--- Provide a general summary of the issue in the Title above --> <!--- If you have a question along the lines of "How do I do this Bash command in xonsh" please first look over the Bash to Xonsh translation guide: http://xon.sh/bash_to_xsh.html If you don't find an answer there, please do open an issue! --> ## xonfig <!--- Please post the output of the `xonfig` command (run from inside xonsh) so we know more about your current setup --> ## Expected Behavior <!--- Tell us what should happen --> ## Current Behavior <!--- Tell us what happens instead of the expected behavior --> <!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` --> ## Steps to Reproduce <!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! --> </issue> <code> [start of xonsh/ptk2/__init__.py] 1 # must come before ptk / pygments imports 2 from xonsh.lazyasd import load_module_in_background 3 4 load_module_in_background( 5 "pkg_resources", 6 debug="XONSH_DEBUG", 7 replacements={"pygments.plugin": "pkg_resources"}, 8 ) 9 [end of xonsh/ptk2/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/ptk2/__init__.py b/xonsh/ptk2/__init__.py --- a/xonsh/ptk2/__init__.py +++ b/xonsh/ptk2/__init__.py @@ -1,8 +0,0 @@ -# must come before ptk / pygments imports -from xonsh.lazyasd import load_module_in_background - -load_module_in_background( - "pkg_resources", - debug="XONSH_DEBUG", - replacements={"pygments.plugin": "pkg_resources"}, -)
{"golden_diff": "diff --git a/xonsh/ptk2/__init__.py b/xonsh/ptk2/__init__.py\n--- a/xonsh/ptk2/__init__.py\n+++ b/xonsh/ptk2/__init__.py\n@@ -1,8 +0,0 @@\n-# must come before ptk / pygments imports\n-from xonsh.lazyasd import load_module_in_background\n-\n-load_module_in_background(\n- \"pkg_resources\",\n- debug=\"XONSH_DEBUG\",\n- replacements={\"pygments.plugin\": \"pkg_resources\"},\n-)\n", "issue": "Exception on startup (pygments_cache)\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: http://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n<!--- Please post the output of the `xonfig` command (run from inside xonsh) so we know more about your current setup -->\r\n\r\n## Expected Behavior\r\n<!--- Tell us what should happen -->\r\n\r\n## Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error\r\nTo enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. \r\nOn Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->\r\n\r\n## Steps to Reproduce\r\n<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->\r\n\n", "before_files": [{"content": "# must come before ptk / pygments imports\nfrom xonsh.lazyasd import load_module_in_background\n\nload_module_in_background(\n \"pkg_resources\",\n debug=\"XONSH_DEBUG\",\n replacements={\"pygments.plugin\": \"pkg_resources\"},\n)\n", "path": "xonsh/ptk2/__init__.py"}]}
868
126
gh_patches_debug_33044
rasdani/github-patches
git_diff
vispy__vispy-2135
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove warning on import about missing networkx dependency Currently, importing vispy (or some submodule) without networkx installed results in a warning about installing networkx, even if the user/downstream library has no intention of using the graph layout. The warning should be delayed and turned into an error, as noted by @djhoese [here](https://github.com/napari/napari/issues/2979#issuecomment-874159877). xref napari/napari#2979 </issue> <code> [start of vispy/visuals/graphs/layouts/networkx_layout.py] 1 # -*- coding: utf-8 -*- 2 # Copyright (c) Vispy Development Team. All Rights Reserved. 3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3 4 from ..util import _straight_line_vertices, issparse 5 import numpy as np 6 try: 7 import networkx as nx 8 except ModuleNotFoundError: 9 import warnings 10 warnings.warn( 11 "Networkx not found, please install network to use its layouts") 12 nx = None 13 14 15 class NetworkxCoordinates: 16 def __init__(self, graph=None, layout=None, **kwargs): 17 """ 18 Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays. 19 20 Parameters 21 ---------- 22 graph : a networkx graph. 23 layout : str or dict or iterable-object of float32, optional 24 - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts. 25 - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate). 26 - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2). 27 kwargs: dict, optional 28 when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx 29 """ 30 if isinstance(graph, type(None)): 31 raise ValueError("Requires networkx input") 32 self.graph = graph 33 self.positions = np.zeros((len(graph), 2), dtype=np.float32) 34 # default random positions 35 if isinstance(layout, type(None)): 36 self.positions = np.random.rand(*self.positions.shape) 37 38 # check for networkx 39 elif isinstance(layout, str): 40 if nx: 41 if not layout.endswith("_layout"): 42 layout += "_layout" # append for nx 43 layout_function = getattr(nx, layout) 44 if layout_function: 45 self.positions = np.asarray( 46 [i for i in dict(layout_function(graph, **kwargs)).values()]) 47 else: 48 raise ValueError("Check networkx for layouts") 49 else: 50 raise ValueError("networkx not found") 51 # assume dict from networkx; values are 2-array 52 elif isinstance(layout, dict): 53 self.positions = np.asarray([i for i in layout.values()]) 54 55 # assume given values 56 elif isinstance(layout, np.ndarray): 57 assert layout.ndim == 2 58 assert layout.shape[0] == len(graph) 59 self.positions = layout 60 else: 61 raise ValueError("Input not understood") 62 63 # normalize coordinates 64 self.positions = (self.positions - self.positions.min()) / \ 65 (self.positions.max() - self.positions.min()) 66 self.positions = self.positions.astype(np.float32) 67 68 def __call__(self, adjacency_mat, directed=False): 69 """ 70 Parameters 71 ---------- 72 adjacency_mat : sparse adjacency matrix. 73 directed : bool, default False 74 75 Returns 76 --------- 77 (node_vertices, line_vertices, arrow_vertices) : tuple 78 Yields the node and line vertices in a tuple. This layout only yields a 79 single time, and has no builtin animation 80 """ 81 if issparse(adjacency_mat): 82 adjacency_mat = adjacency_mat.tocoo() 83 line_vertices, arrows = _straight_line_vertices( 84 adjacency_mat, self.positions, directed) 85 86 yield self.positions, line_vertices, arrows 87 88 @property 89 def adj(self): 90 """Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function.""" 91 return nx.adjacency_matrix(self.graph) 92 [end of vispy/visuals/graphs/layouts/networkx_layout.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vispy/visuals/graphs/layouts/networkx_layout.py b/vispy/visuals/graphs/layouts/networkx_layout.py --- a/vispy/visuals/graphs/layouts/networkx_layout.py +++ b/vispy/visuals/graphs/layouts/networkx_layout.py @@ -6,9 +6,6 @@ try: import networkx as nx except ModuleNotFoundError: - import warnings - warnings.warn( - "Networkx not found, please install network to use its layouts") nx = None @@ -27,6 +24,8 @@ kwargs: dict, optional when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx """ + if nx is None: + raise ValueError("networkx not found, please install networkx to use its layouts") if isinstance(graph, type(None)): raise ValueError("Requires networkx input") self.graph = graph @@ -37,17 +36,14 @@ # check for networkx elif isinstance(layout, str): - if nx: - if not layout.endswith("_layout"): - layout += "_layout" # append for nx - layout_function = getattr(nx, layout) - if layout_function: - self.positions = np.asarray( - [i for i in dict(layout_function(graph, **kwargs)).values()]) - else: - raise ValueError("Check networkx for layouts") + if not layout.endswith("_layout"): + layout += "_layout" # append for nx + layout_function = getattr(nx, layout) + if layout_function: + self.positions = np.asarray( + [i for i in dict(layout_function(graph, **kwargs)).values()]) else: - raise ValueError("networkx not found") + raise ValueError("Check networkx for layouts") # assume dict from networkx; values are 2-array elif isinstance(layout, dict): self.positions = np.asarray([i for i in layout.values()])
{"golden_diff": "diff --git a/vispy/visuals/graphs/layouts/networkx_layout.py b/vispy/visuals/graphs/layouts/networkx_layout.py\n--- a/vispy/visuals/graphs/layouts/networkx_layout.py\n+++ b/vispy/visuals/graphs/layouts/networkx_layout.py\n@@ -6,9 +6,6 @@\n try:\n import networkx as nx\n except ModuleNotFoundError:\n- import warnings\n- warnings.warn(\n- \"Networkx not found, please install network to use its layouts\")\n nx = None\n \n \n@@ -27,6 +24,8 @@\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n+ if nx is None:\n+ raise ValueError(\"networkx not found, please install networkx to use its layouts\")\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n@@ -37,17 +36,14 @@\n \n # check for networkx\n elif isinstance(layout, str):\n- if nx:\n- if not layout.endswith(\"_layout\"):\n- layout += \"_layout\" # append for nx\n- layout_function = getattr(nx, layout)\n- if layout_function:\n- self.positions = np.asarray(\n- [i for i in dict(layout_function(graph, **kwargs)).values()])\n- else:\n- raise ValueError(\"Check networkx for layouts\")\n+ if not layout.endswith(\"_layout\"):\n+ layout += \"_layout\" # append for nx\n+ layout_function = getattr(nx, layout)\n+ if layout_function:\n+ self.positions = np.asarray(\n+ [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n- raise ValueError(\"networkx not found\")\n+ raise ValueError(\"Check networkx for layouts\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n", "issue": "Remove warning on import about missing networkx dependency\nCurrently, importing vispy (or some submodule) without networkx installed results in a warning about installing networkx, even if the user/downstream library has no intention of using the graph layout. The warning should be delayed and turned into an error, as noted by @djhoese [here](https://github.com/napari/napari/issues/2979#issuecomment-874159877).\r\n\r\nxref napari/napari#2979\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3\nfrom ..util import _straight_line_vertices, issparse\nimport numpy as np\ntry:\n import networkx as nx\nexcept ModuleNotFoundError:\n import warnings\n warnings.warn(\n \"Networkx not found, please install network to use its layouts\")\n nx = None\n\n\nclass NetworkxCoordinates:\n def __init__(self, graph=None, layout=None, **kwargs):\n \"\"\"\n Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays.\n\n Parameters\n ----------\n graph : a networkx graph.\n layout : str or dict or iterable-object of float32, optional\n - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts.\n - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate).\n - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2).\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n self.positions = np.zeros((len(graph), 2), dtype=np.float32)\n # default random positions\n if isinstance(layout, type(None)):\n self.positions = np.random.rand(*self.positions.shape)\n\n # check for networkx\n elif isinstance(layout, str):\n if nx:\n if not layout.endswith(\"_layout\"):\n layout += \"_layout\" # append for nx\n layout_function = getattr(nx, layout)\n if layout_function:\n self.positions = np.asarray(\n [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n raise ValueError(\"Check networkx for layouts\")\n else:\n raise ValueError(\"networkx not found\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n\n # assume given values\n elif isinstance(layout, np.ndarray):\n assert layout.ndim == 2\n assert layout.shape[0] == len(graph)\n self.positions = layout\n else:\n raise ValueError(\"Input not understood\")\n\n # normalize coordinates\n self.positions = (self.positions - self.positions.min()) / \\\n (self.positions.max() - self.positions.min())\n self.positions = self.positions.astype(np.float32)\n\n def __call__(self, adjacency_mat, directed=False):\n \"\"\"\n Parameters\n ----------\n adjacency_mat : sparse adjacency matrix.\n directed : bool, default False\n\n Returns\n ---------\n (node_vertices, line_vertices, arrow_vertices) : tuple\n Yields the node and line vertices in a tuple. This layout only yields a\n single time, and has no builtin animation\n \"\"\"\n if issparse(adjacency_mat):\n adjacency_mat = adjacency_mat.tocoo()\n line_vertices, arrows = _straight_line_vertices(\n adjacency_mat, self.positions, directed)\n\n yield self.positions, line_vertices, arrows\n\n @property\n def adj(self):\n \"\"\"Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function.\"\"\"\n return nx.adjacency_matrix(self.graph)\n", "path": "vispy/visuals/graphs/layouts/networkx_layout.py"}]}
1,617
448
gh_patches_debug_59198
rasdani/github-patches
git_diff
Nitrate__Nitrate-319
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix filter calls Similar with the calls to `map`, these calls must be fixed as well. </issue> <code> [start of tcms/testplans/helpers/email.py] 1 # -*- coding: utf-8 -*- 2 from django.conf import settings 3 4 from tcms.core.utils.mailto import send_email_using_threading 5 6 7 def email_plan_update(plan): 8 recipients = get_plan_notification_recipients(plan) 9 if len(recipients) == 0: 10 return 11 subject = u'TestPlan %s has been updated.' % plan.pk 12 send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject, 13 recipients, {'plan': plan}) 14 15 16 def email_plan_deletion(plan): 17 recipients = get_plan_notification_recipients(plan) 18 if len(recipients) == 0: 19 return 20 subject = u'TestPlan %s has been deleted.' % plan.pk 21 send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject, 22 recipients, {'plan': plan}) 23 24 25 def get_plan_notification_recipients(plan): 26 recipients = set() 27 if plan.owner: 28 if plan.email_settings.auto_to_plan_owner: 29 recipients.add(plan.owner.email) 30 if plan.email_settings.auto_to_plan_author: 31 recipients.add(plan.author.email) 32 if plan.email_settings.auto_to_case_owner: 33 case_authors = plan.case.values_list('author__email', flat=True) 34 recipients.update(case_authors) 35 if plan.email_settings.auto_to_case_default_tester: 36 case_testers = plan.case.values_list('default_tester__email', 37 flat=True) 38 recipients.update(case_testers) 39 return filter(lambda e: bool(e), recipients) 40 [end of tcms/testplans/helpers/email.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tcms/testplans/helpers/email.py b/tcms/testplans/helpers/email.py --- a/tcms/testplans/helpers/email.py +++ b/tcms/testplans/helpers/email.py @@ -36,4 +36,4 @@ case_testers = plan.case.values_list('default_tester__email', flat=True) recipients.update(case_testers) - return filter(lambda e: bool(e), recipients) + return [r for r in recipients if r]
{"golden_diff": "diff --git a/tcms/testplans/helpers/email.py b/tcms/testplans/helpers/email.py\n--- a/tcms/testplans/helpers/email.py\n+++ b/tcms/testplans/helpers/email.py\n@@ -36,4 +36,4 @@\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n- return filter(lambda e: bool(e), recipients)\n+ return [r for r in recipients if r]\n", "issue": "Fix filter calls\nSimilar with the calls to `map`, these calls must be fixed as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\n\nfrom tcms.core.utils.mailto import send_email_using_threading\n\n\ndef email_plan_update(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been updated.' % plan.pk\n send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef email_plan_deletion(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been deleted.' % plan.pk\n send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef get_plan_notification_recipients(plan):\n recipients = set()\n if plan.owner:\n if plan.email_settings.auto_to_plan_owner:\n recipients.add(plan.owner.email)\n if plan.email_settings.auto_to_plan_author:\n recipients.add(plan.author.email)\n if plan.email_settings.auto_to_case_owner:\n case_authors = plan.case.values_list('author__email', flat=True)\n recipients.update(case_authors)\n if plan.email_settings.auto_to_case_default_tester:\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n return filter(lambda e: bool(e), recipients)\n", "path": "tcms/testplans/helpers/email.py"}]}
942
104
gh_patches_debug_2599
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-3323
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of colossalai/fx/tracer/_tracer_utils.py] 1 from typing import List, Union, Any 2 from ..proxy import ColoProxy, ColoAttribute 3 import torch 4 from .meta_patch import meta_patched_function, meta_patched_module 5 6 __all__ = ['is_element_in_list', 'extract_meta'] 7 8 9 def is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]): 10 if isinstance(elements, (tuple, list, set)): 11 for ele in elements: 12 if ele not in list_: 13 return False, ele 14 else: 15 if elements not in list_: 16 return False, elements 17 18 return True, None 19 20 21 def extract_meta(*args, **kwargs): 22 23 def _convert(val): 24 if isinstance(val, ColoProxy): 25 return val.meta_data 26 elif isinstance(val, (list, tuple)): 27 return type(val)([_convert(ele) for ele in val]) 28 29 return val 30 31 new_args = [_convert(val) for val in args] 32 new_kwargs = {k: _convert(v) for k, v in kwargs.items()} 33 return new_args, new_kwargs 34 35 36 def compute_meta_data_for_functions_proxy(target, args, kwargs): 37 args_metas, kwargs_metas = extract_meta(*args, **kwargs) 38 39 # fetch patched function 40 if meta_patched_function.has(target): 41 meta_target = meta_patched_function.get(target) 42 elif meta_patched_function.has(target.__name__): 43 meta_target = meta_patched_function.get(target.__name__) 44 else: 45 meta_target = target 46 meta_out = meta_target(*args_metas, **kwargs_metas) 47 if isinstance(meta_out, torch.Tensor): 48 meta_out = meta_out.to(device="meta") 49 50 return meta_out 51 [end of colossalai/fx/tracer/_tracer_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/colossalai/fx/tracer/_tracer_utils.py b/colossalai/fx/tracer/_tracer_utils.py --- a/colossalai/fx/tracer/_tracer_utils.py +++ b/colossalai/fx/tracer/_tracer_utils.py @@ -1,6 +1,8 @@ -from typing import List, Union, Any -from ..proxy import ColoProxy, ColoAttribute +from typing import Any, List, Union + import torch + +from ..proxy import ColoAttribute, ColoProxy from .meta_patch import meta_patched_function, meta_patched_module __all__ = ['is_element_in_list', 'extract_meta']
{"golden_diff": "diff --git a/colossalai/fx/tracer/_tracer_utils.py b/colossalai/fx/tracer/_tracer_utils.py\n--- a/colossalai/fx/tracer/_tracer_utils.py\n+++ b/colossalai/fx/tracer/_tracer_utils.py\n@@ -1,6 +1,8 @@\n-from typing import List, Union, Any\n-from ..proxy import ColoProxy, ColoAttribute\n+from typing import Any, List, Union\n+\n import torch\n+\n+from ..proxy import ColoAttribute, ColoProxy\n from .meta_patch import meta_patched_function, meta_patched_module\n \n __all__ = ['is_element_in_list', 'extract_meta']\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import List, Union, Any\nfrom ..proxy import ColoProxy, ColoAttribute\nimport torch\nfrom .meta_patch import meta_patched_function, meta_patched_module\n\n__all__ = ['is_element_in_list', 'extract_meta']\n\n\ndef is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]):\n if isinstance(elements, (tuple, list, set)):\n for ele in elements:\n if ele not in list_:\n return False, ele\n else:\n if elements not in list_:\n return False, elements\n\n return True, None\n\n\ndef extract_meta(*args, **kwargs):\n\n def _convert(val):\n if isinstance(val, ColoProxy):\n return val.meta_data\n elif isinstance(val, (list, tuple)):\n return type(val)([_convert(ele) for ele in val])\n\n return val\n\n new_args = [_convert(val) for val in args]\n new_kwargs = {k: _convert(v) for k, v in kwargs.items()}\n return new_args, new_kwargs\n\n\ndef compute_meta_data_for_functions_proxy(target, args, kwargs):\n args_metas, kwargs_metas = extract_meta(*args, **kwargs)\n\n # fetch patched function\n if meta_patched_function.has(target):\n meta_target = meta_patched_function.get(target)\n elif meta_patched_function.has(target.__name__):\n meta_target = meta_patched_function.get(target.__name__)\n else:\n meta_target = target\n meta_out = meta_target(*args_metas, **kwargs_metas)\n if isinstance(meta_out, torch.Tensor):\n meta_out = meta_out.to(device=\"meta\")\n\n return meta_out\n", "path": "colossalai/fx/tracer/_tracer_utils.py"}]}
1,028
145
gh_patches_debug_5791
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-195
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Other] Update name to paperless-ngx in docs https://github.com/paperless-ngx/paperless-ngx/issues/4#issuecomment-1039890021 > I would go through the places where the project name is mentioned and add the x to it. Only actually code (variable names) could remain unchanged for backwards compatibility. </issue> <code> [start of src/paperless/urls.py] 1 from django.conf.urls import include 2 from django.contrib import admin 3 from django.contrib.auth.decorators import login_required 4 from django.urls import path, re_path 5 from django.views.decorators.csrf import csrf_exempt 6 from django.views.generic import RedirectView 7 from rest_framework.authtoken import views 8 from rest_framework.routers import DefaultRouter 9 10 from django.utils.translation import gettext_lazy as _ 11 12 from django.conf import settings 13 14 from paperless.consumers import StatusConsumer 15 from documents.views import ( 16 CorrespondentViewSet, 17 UnifiedSearchViewSet, 18 LogViewSet, 19 TagViewSet, 20 DocumentTypeViewSet, 21 IndexView, 22 SearchAutoCompleteView, 23 StatisticsView, 24 PostDocumentView, 25 SavedViewViewSet, 26 BulkEditView, 27 SelectionDataView, 28 BulkDownloadView, 29 ) 30 from paperless.views import FaviconView 31 32 api_router = DefaultRouter() 33 api_router.register(r"correspondents", CorrespondentViewSet) 34 api_router.register(r"document_types", DocumentTypeViewSet) 35 api_router.register(r"documents", UnifiedSearchViewSet) 36 api_router.register(r"logs", LogViewSet, basename="logs") 37 api_router.register(r"tags", TagViewSet) 38 api_router.register(r"saved_views", SavedViewViewSet) 39 40 41 urlpatterns = [ 42 re_path( 43 r"^api/", 44 include( 45 [ 46 re_path( 47 r"^auth/", 48 include( 49 ("rest_framework.urls", "rest_framework"), 50 namespace="rest_framework", 51 ), 52 ), 53 re_path( 54 r"^search/autocomplete/", 55 SearchAutoCompleteView.as_view(), 56 name="autocomplete", 57 ), 58 re_path(r"^statistics/", StatisticsView.as_view(), name="statistics"), 59 re_path( 60 r"^documents/post_document/", 61 PostDocumentView.as_view(), 62 name="post_document", 63 ), 64 re_path( 65 r"^documents/bulk_edit/", BulkEditView.as_view(), name="bulk_edit" 66 ), 67 re_path( 68 r"^documents/selection_data/", 69 SelectionDataView.as_view(), 70 name="selection_data", 71 ), 72 re_path( 73 r"^documents/bulk_download/", 74 BulkDownloadView.as_view(), 75 name="bulk_download", 76 ), 77 path("token/", views.obtain_auth_token), 78 ] 79 + api_router.urls 80 ), 81 ), 82 re_path(r"^favicon.ico$", FaviconView.as_view(), name="favicon"), 83 re_path(r"admin/", admin.site.urls), 84 re_path( 85 r"^fetch/", 86 include( 87 [ 88 re_path( 89 r"^doc/(?P<pk>\d+)$", 90 RedirectView.as_view( 91 url=settings.BASE_URL + "api/documents/%(pk)s/download/" 92 ), 93 ), 94 re_path( 95 r"^thumb/(?P<pk>\d+)$", 96 RedirectView.as_view( 97 url=settings.BASE_URL + "api/documents/%(pk)s/thumb/" 98 ), 99 ), 100 re_path( 101 r"^preview/(?P<pk>\d+)$", 102 RedirectView.as_view( 103 url=settings.BASE_URL + "api/documents/%(pk)s/preview/" 104 ), 105 ), 106 ] 107 ), 108 ), 109 re_path( 110 r"^push$", 111 csrf_exempt( 112 RedirectView.as_view(url=settings.BASE_URL + "api/documents/post_document/") 113 ), 114 ), 115 # Frontend assets TODO: this is pretty bad, but it works. 116 path( 117 "assets/<path:path>", 118 RedirectView.as_view( 119 url=settings.STATIC_URL + "frontend/en-US/assets/%(path)s" 120 ), 121 ), 122 # TODO: with localization, this is even worse! :/ 123 # login, logout 124 path("accounts/", include("django.contrib.auth.urls")), 125 # Root of the Frontent 126 re_path(r".*", login_required(IndexView.as_view()), name="base"), 127 ] 128 129 130 websocket_urlpatterns = [ 131 re_path(r"ws/status/$", StatusConsumer.as_asgi()), 132 ] 133 134 # Text in each page's <h1> (and above login form). 135 admin.site.site_header = "Paperless-ng" 136 # Text at the end of each page's <title>. 137 admin.site.site_title = "Paperless-ng" 138 # Text at the top of the admin index page. 139 admin.site.index_title = _("Paperless-ng administration") 140 [end of src/paperless/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/paperless/urls.py b/src/paperless/urls.py --- a/src/paperless/urls.py +++ b/src/paperless/urls.py @@ -132,8 +132,8 @@ ] # Text in each page's <h1> (and above login form). -admin.site.site_header = "Paperless-ng" +admin.site.site_header = "Paperless-ngx" # Text at the end of each page's <title>. -admin.site.site_title = "Paperless-ng" +admin.site.site_title = "Paperless-ngx" # Text at the top of the admin index page. -admin.site.index_title = _("Paperless-ng administration") +admin.site.index_title = _("Paperless-ngx administration")
{"golden_diff": "diff --git a/src/paperless/urls.py b/src/paperless/urls.py\n--- a/src/paperless/urls.py\n+++ b/src/paperless/urls.py\n@@ -132,8 +132,8 @@\n ]\n \n # Text in each page's <h1> (and above login form).\n-admin.site.site_header = \"Paperless-ng\"\n+admin.site.site_header = \"Paperless-ngx\"\n # Text at the end of each page's <title>.\n-admin.site.site_title = \"Paperless-ng\"\n+admin.site.site_title = \"Paperless-ngx\"\n # Text at the top of the admin index page.\n-admin.site.index_title = _(\"Paperless-ng administration\")\n+admin.site.index_title = _(\"Paperless-ngx administration\")\n", "issue": "[Other] Update name to paperless-ngx in docs\nhttps://github.com/paperless-ngx/paperless-ngx/issues/4#issuecomment-1039890021\r\n\r\n> I would go through the places where the project name is mentioned and add the x to it. Only actually code (variable names) could remain unchanged for backwards compatibility.\n", "before_files": [{"content": "from django.conf.urls import include\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import path, re_path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import RedirectView\nfrom rest_framework.authtoken import views\nfrom rest_framework.routers import DefaultRouter\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.conf import settings\n\nfrom paperless.consumers import StatusConsumer\nfrom documents.views import (\n CorrespondentViewSet,\n UnifiedSearchViewSet,\n LogViewSet,\n TagViewSet,\n DocumentTypeViewSet,\n IndexView,\n SearchAutoCompleteView,\n StatisticsView,\n PostDocumentView,\n SavedViewViewSet,\n BulkEditView,\n SelectionDataView,\n BulkDownloadView,\n)\nfrom paperless.views import FaviconView\n\napi_router = DefaultRouter()\napi_router.register(r\"correspondents\", CorrespondentViewSet)\napi_router.register(r\"document_types\", DocumentTypeViewSet)\napi_router.register(r\"documents\", UnifiedSearchViewSet)\napi_router.register(r\"logs\", LogViewSet, basename=\"logs\")\napi_router.register(r\"tags\", TagViewSet)\napi_router.register(r\"saved_views\", SavedViewViewSet)\n\n\nurlpatterns = [\n re_path(\n r\"^api/\",\n include(\n [\n re_path(\n r\"^auth/\",\n include(\n (\"rest_framework.urls\", \"rest_framework\"),\n namespace=\"rest_framework\",\n ),\n ),\n re_path(\n r\"^search/autocomplete/\",\n SearchAutoCompleteView.as_view(),\n name=\"autocomplete\",\n ),\n re_path(r\"^statistics/\", StatisticsView.as_view(), name=\"statistics\"),\n re_path(\n r\"^documents/post_document/\",\n PostDocumentView.as_view(),\n name=\"post_document\",\n ),\n re_path(\n r\"^documents/bulk_edit/\", BulkEditView.as_view(), name=\"bulk_edit\"\n ),\n re_path(\n r\"^documents/selection_data/\",\n SelectionDataView.as_view(),\n name=\"selection_data\",\n ),\n re_path(\n r\"^documents/bulk_download/\",\n BulkDownloadView.as_view(),\n name=\"bulk_download\",\n ),\n path(\"token/\", views.obtain_auth_token),\n ]\n + api_router.urls\n ),\n ),\n re_path(r\"^favicon.ico$\", FaviconView.as_view(), name=\"favicon\"),\n re_path(r\"admin/\", admin.site.urls),\n re_path(\n r\"^fetch/\",\n include(\n [\n re_path(\n r\"^doc/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/download/\"\n ),\n ),\n re_path(\n r\"^thumb/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/thumb/\"\n ),\n ),\n re_path(\n r\"^preview/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/preview/\"\n ),\n ),\n ]\n ),\n ),\n re_path(\n r\"^push$\",\n csrf_exempt(\n RedirectView.as_view(url=settings.BASE_URL + \"api/documents/post_document/\")\n ),\n ),\n # Frontend assets TODO: this is pretty bad, but it works.\n path(\n \"assets/<path:path>\",\n RedirectView.as_view(\n url=settings.STATIC_URL + \"frontend/en-US/assets/%(path)s\"\n ),\n ),\n # TODO: with localization, this is even worse! :/\n # login, logout\n path(\"accounts/\", include(\"django.contrib.auth.urls\")),\n # Root of the Frontent\n re_path(r\".*\", login_required(IndexView.as_view()), name=\"base\"),\n]\n\n\nwebsocket_urlpatterns = [\n re_path(r\"ws/status/$\", StatusConsumer.as_asgi()),\n]\n\n# Text in each page's <h1> (and above login form).\nadmin.site.site_header = \"Paperless-ng\"\n# Text at the end of each page's <title>.\nadmin.site.site_title = \"Paperless-ng\"\n# Text at the top of the admin index page.\nadmin.site.index_title = _(\"Paperless-ng administration\")\n", "path": "src/paperless/urls.py"}]}
1,816
164
gh_patches_debug_34903
rasdani/github-patches
git_diff
data-for-change__anyway-731
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move load_discussions into main I can't figure out what's the purpose of this file </issue> <code> [start of anyway/load_discussions.py] 1 # -*- coding: utf-8 -*- 2 import argparse 3 from .models import DiscussionMarker 4 import re 5 from .database import db_session 6 import sys 7 import logging 8 9 def main(): 10 parser = argparse.ArgumentParser() 11 parser.add_argument('identifiers', type=str, nargs='*', 12 help='Disqus identifiers to create markers for') 13 args = parser.parse_args() 14 15 identifiers = args.identifiers if args.identifiers else sys.stdin 16 17 for identifier in identifiers: 18 m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier) 19 if not m: 20 logging.error("Failed processing: " + identifier) 21 continue 22 (latitude, longitude) = m.group(1, 2) 23 marker = DiscussionMarker.parse({ 24 'latitude': latitude, 25 'longitude': longitude, 26 'title': identifier, 27 'identifier': identifier 28 }) 29 try: 30 db_session.add(marker) 31 db_session.commit() 32 logging.info("Added: " + identifier, end="") 33 except: 34 db_session.rollback() 35 logging.warn("Failed: " + identifier, end="") 36 37 38 if __name__ == "__main__": 39 main() 40 [end of anyway/load_discussions.py] [start of main.py] 1 #!/usr/bin/env python 2 3 import os 4 import click 5 import logging 6 7 @click.group() 8 def cli(): 9 pass 10 11 12 @cli.command() 13 @click.option('--open', 'open_server', is_flag=True, 14 help='Open the server for communication from outside', default=False) 15 def testserver(open_server): 16 from anyway import app, united 17 from apscheduler.scheduler import Scheduler 18 19 sched = Scheduler() 20 21 @sched.interval_schedule(hours=12) 22 def scheduled_import(): 23 united.main() 24 sched.start() 25 26 logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s') 27 28 default_host = '0.0.0.0' if open_server else '127.0.0.1' 29 app.run(debug=True, host=os.getenv('IP', default_host), 30 port=int(os.getenv('PORT', 5000))) 31 32 33 @cli.command() 34 def init_db(): 35 from anyway.models import init_db 36 init_db() 37 38 39 @cli.command() 40 @click.option('--specific_folder', is_flag=True, default=False) 41 @click.option('--delete_all', is_flag=True) 42 @click.option('--path', type=str, default="static/data/lms") 43 @click.option('--batch_size', type=int, default=100) 44 @click.option('--provider_code', type=int) 45 def process_data(specific_folder, delete_all, path, batch_size, provider_code): 46 from anyway.process import main 47 48 return main(specific_folder=specific_folder, delete_all=delete_all, path=path, 49 batch_size=batch_size, provider_code=provider_code) 50 51 52 @cli.command() 53 @click.option('--light', is_flag=True, help='Import without downloading any new files') 54 @click.option('--username', default='') 55 @click.option('--password', default='') 56 @click.option('--lastmail', is_flag=True) 57 def import_united_data(light, username, password, lastmail): 58 from anyway.united import main 59 60 return main(light=light, username=username, password=password, lastmail=lastmail) 61 62 if __name__ == '__main__': 63 cli() 64 [end of main.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/anyway/load_discussions.py b/anyway/load_discussions.py deleted file mode 100755 --- a/anyway/load_discussions.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -import argparse -from .models import DiscussionMarker -import re -from .database import db_session -import sys -import logging - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('identifiers', type=str, nargs='*', - help='Disqus identifiers to create markers for') - args = parser.parse_args() - - identifiers = args.identifiers if args.identifiers else sys.stdin - - for identifier in identifiers: - m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier) - if not m: - logging.error("Failed processing: " + identifier) - continue - (latitude, longitude) = m.group(1, 2) - marker = DiscussionMarker.parse({ - 'latitude': latitude, - 'longitude': longitude, - 'title': identifier, - 'identifier': identifier - }) - try: - db_session.add(marker) - db_session.commit() - logging.info("Added: " + identifier, end="") - except: - db_session.rollback() - logging.warn("Failed: " + identifier, end="") - - -if __name__ == "__main__": - main() diff --git a/main.py b/main.py --- a/main.py +++ b/main.py @@ -1,8 +1,10 @@ #!/usr/bin/env python - -import os import click import logging +import os +import re +import sys + @click.group() def cli(): @@ -59,5 +61,36 @@ return main(light=light, username=username, password=password, lastmail=lastmail) + [email protected]() [email protected]('identifiers', nargs=-1) +def load_discussions(identifiers): + from anyway.database import db_session + from anyway.models import DiscussionMarker + + identifiers = identifiers or sys.stdin + + for identifier in identifiers: + identifier = identifier.strip() + m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier) + if not m: + logging.error("Failed processing: " + identifier) + continue + (latitude, longitude) = m.group(1, 2) + marker = DiscussionMarker.parse({ + 'latitude': latitude, + 'longitude': longitude, + 'title': identifier, + 'identifier': identifier + }) + try: + db_session.add(marker) + db_session.commit() + logging.info("Added: " + identifier) + except Exception as e: + db_session.rollback() + logging.warn("Failed: " + identifier + ": " + e.message) + + if __name__ == '__main__': cli()
{"golden_diff": "diff --git a/anyway/load_discussions.py b/anyway/load_discussions.py\ndeleted file mode 100755\n--- a/anyway/load_discussions.py\n+++ /dev/null\n@@ -1,39 +0,0 @@\n-# -*- coding: utf-8 -*-\n-import argparse\n-from .models import DiscussionMarker\n-import re\n-from .database import db_session\n-import sys\n-import logging\n-\n-def main():\n- parser = argparse.ArgumentParser()\n- parser.add_argument('identifiers', type=str, nargs='*',\n- help='Disqus identifiers to create markers for')\n- args = parser.parse_args()\n-\n- identifiers = args.identifiers if args.identifiers else sys.stdin\n-\n- for identifier in identifiers:\n- m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n- if not m:\n- logging.error(\"Failed processing: \" + identifier)\n- continue\n- (latitude, longitude) = m.group(1, 2)\n- marker = DiscussionMarker.parse({\n- 'latitude': latitude,\n- 'longitude': longitude,\n- 'title': identifier,\n- 'identifier': identifier\n- })\n- try:\n- db_session.add(marker)\n- db_session.commit()\n- logging.info(\"Added: \" + identifier, end=\"\")\n- except:\n- db_session.rollback()\n- logging.warn(\"Failed: \" + identifier, end=\"\")\n-\n-\n-if __name__ == \"__main__\":\n- main()\ndiff --git a/main.py b/main.py\n--- a/main.py\n+++ b/main.py\n@@ -1,8 +1,10 @@\n #!/usr/bin/env python\n-\n-import os\n import click\n import logging\n+import os\n+import re\n+import sys\n+\n \n @click.group()\n def cli():\n@@ -59,5 +61,36 @@\n \n return main(light=light, username=username, password=password, lastmail=lastmail)\n \n+\[email protected]()\[email protected]('identifiers', nargs=-1)\n+def load_discussions(identifiers):\n+ from anyway.database import db_session\n+ from anyway.models import DiscussionMarker\n+\n+ identifiers = identifiers or sys.stdin\n+\n+ for identifier in identifiers:\n+ identifier = identifier.strip()\n+ m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n+ if not m:\n+ logging.error(\"Failed processing: \" + identifier)\n+ continue\n+ (latitude, longitude) = m.group(1, 2)\n+ marker = DiscussionMarker.parse({\n+ 'latitude': latitude,\n+ 'longitude': longitude,\n+ 'title': identifier,\n+ 'identifier': identifier\n+ })\n+ try:\n+ db_session.add(marker)\n+ db_session.commit()\n+ logging.info(\"Added: \" + identifier)\n+ except Exception as e:\n+ db_session.rollback()\n+ logging.warn(\"Failed: \" + identifier + \": \" + e.message)\n+\n+\n if __name__ == '__main__':\n cli()\n", "issue": "Move load_discussions into main\nI can't figure out what's the purpose of this file\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport argparse\nfrom .models import DiscussionMarker\nimport re\nfrom .database import db_session\nimport sys\nimport logging\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('identifiers', type=str, nargs='*',\n help='Disqus identifiers to create markers for')\n args = parser.parse_args()\n\n identifiers = args.identifiers if args.identifiers else sys.stdin\n\n for identifier in identifiers:\n m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n if not m:\n logging.error(\"Failed processing: \" + identifier)\n continue\n (latitude, longitude) = m.group(1, 2)\n marker = DiscussionMarker.parse({\n 'latitude': latitude,\n 'longitude': longitude,\n 'title': identifier,\n 'identifier': identifier\n })\n try:\n db_session.add(marker)\n db_session.commit()\n logging.info(\"Added: \" + identifier, end=\"\")\n except:\n db_session.rollback()\n logging.warn(\"Failed: \" + identifier, end=\"\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "anyway/load_discussions.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport click\nimport logging\n\[email protected]()\ndef cli():\n pass\n\n\[email protected]()\[email protected]('--open', 'open_server', is_flag=True,\n help='Open the server for communication from outside', default=False)\ndef testserver(open_server):\n from anyway import app, united\n from apscheduler.scheduler import Scheduler\n\n sched = Scheduler()\n\n @sched.interval_schedule(hours=12)\n def scheduled_import():\n united.main()\n sched.start()\n\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\n\n default_host = '0.0.0.0' if open_server else '127.0.0.1'\n app.run(debug=True, host=os.getenv('IP', default_host),\n port=int(os.getenv('PORT', 5000)))\n\n\[email protected]()\ndef init_db():\n from anyway.models import init_db\n init_db()\n\n\[email protected]()\[email protected]('--specific_folder', is_flag=True, default=False)\[email protected]('--delete_all', is_flag=True)\[email protected]('--path', type=str, default=\"static/data/lms\")\[email protected]('--batch_size', type=int, default=100)\[email protected]('--provider_code', type=int)\ndef process_data(specific_folder, delete_all, path, batch_size, provider_code):\n from anyway.process import main\n\n return main(specific_folder=specific_folder, delete_all=delete_all, path=path,\n batch_size=batch_size, provider_code=provider_code)\n\n\[email protected]()\[email protected]('--light', is_flag=True, help='Import without downloading any new files')\[email protected]('--username', default='')\[email protected]('--password', default='')\[email protected]('--lastmail', is_flag=True)\ndef import_united_data(light, username, password, lastmail):\n from anyway.united import main\n\n return main(light=light, username=username, password=password, lastmail=lastmail)\n\nif __name__ == '__main__':\n cli()\n", "path": "main.py"}]}
1,457
686
gh_patches_debug_367
rasdani/github-patches
git_diff
rasterio__rasterio-1305
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add `GetMetadataItem` like method Ref: https://github.com/mapbox/rasterio/issues/1077, I'm proposing to add a new method in https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx to replicate GDAL GetMetadataItem **Method Name:** `get_metadata_item` or `get_metadata` **Why:** I need to be able to get TIFF metadata like `band.GetMetadataItem('IFD_OFFSET', 'TIFF')` **Code:** ```cython def get_metadata(self, bidx, ns, dm=None, ovr=None): """Returns metadata item Parameters ---------- bidx: int Band index, starting with 1. name: str The key for the metadata item to fetch. domain: str The domain to fetch for. ovr: int Overview level Returns ------- str """ cdef GDALMajorObjectH b = NULL cdef GDALMajorObjectH obj = NULL cdef char *value = NULL cdef const char *name = NULL cdef const char *domain = NULL ns = ns.encode('utf-8') name = ns if dm: dm = dm.encode('utf-8') domain = dm b = self.band(bidx) if ovr: b = GDALGetOverview(b, ovr) obj = b value = GDALGetMetadataItem(obj, name, domain) if value == NULL: return None else: return value ``` @sgillies I'm happy to submit a PR for that :-) </issue> <code> [start of rasterio/errors.py] 1 """Errors and Warnings.""" 2 3 from click import FileError 4 5 6 class RasterioError(Exception): 7 """Root exception class""" 8 9 10 class WindowError(RasterioError): 11 """Raised when errors occur during window operations""" 12 13 14 class CRSError(ValueError): 15 """Raised when a CRS string or mapping is invalid or cannot serve 16 to define a coordinate transformation.""" 17 18 19 class EnvError(RasterioError): 20 """Raised when the state of GDAL/AWS environment cannot be created 21 or modified.""" 22 23 24 class DriverRegistrationError(ValueError): 25 """Raised when a format driver is requested but is not registered.""" 26 27 28 class FileOverwriteError(FileError): 29 """Raised when Rasterio's CLI refuses to clobber output files.""" 30 31 def __init__(self, message): 32 """Raise FileOverwriteError with message as hint.""" 33 super(FileOverwriteError, self).__init__('', hint=message) 34 35 36 class RasterioIOError(IOError): 37 """Raised when a dataset cannot be opened using one of the 38 registered format drivers.""" 39 40 41 class NodataShadowWarning(UserWarning): 42 """Warn that a dataset's nodata attribute is shadowing its alpha band.""" 43 44 def __str__(self): 45 return ("The dataset's nodata attribute is shadowing " 46 "the alpha band. All masks will be determined " 47 "by the nodata attribute") 48 49 50 class NotGeoreferencedWarning(UserWarning): 51 """Warn that a dataset isn't georeferenced.""" 52 53 54 class GDALBehaviorChangeException(RuntimeError): 55 """Raised when GDAL's behavior differs from the given arguments. For 56 example, antimeridian cutting is always on as of GDAL 2.2.0. Users 57 expecting it to be off will be presented with a MultiPolygon when the 58 rest of their code expects a Polygon. 59 60 # Raises an exception on GDAL >= 2.2.0 61 rasterio.warp.transform_geometry( 62 src_crs, dst_crs, antimeridian_cutting=False) 63 """ 64 65 66 class GDALOptionNotImplementedError(RasterioError): 67 """A dataset opening or dataset creation option can't be supported 68 69 This will be raised from Rasterio's shim modules. For example, when 70 a user passes arguments to open_dataset() that can't be evaluated 71 by GDAL 1.x. 72 """ 73 74 class GDALVersionError(RasterioError): 75 """Raised if the runtime version of GDAL does not meet the required 76 version of GDAL.""" 77 78 79 class WindowEvaluationError(ValueError): 80 """Raised when window evaluation fails""" 81 82 83 class RasterioDeprecationWarning(UserWarning): 84 """Rasterio module deprecations""" 85 86 87 class RasterBlockError(RasterioError): 88 """Raised when raster block access fails""" 89 [end of rasterio/errors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/errors.py b/rasterio/errors.py --- a/rasterio/errors.py +++ b/rasterio/errors.py @@ -86,3 +86,7 @@ class RasterBlockError(RasterioError): """Raised when raster block access fails""" + + +class BandOverviewError(UserWarning): + """Raised when a band overview access fails."""
{"golden_diff": "diff --git a/rasterio/errors.py b/rasterio/errors.py\n--- a/rasterio/errors.py\n+++ b/rasterio/errors.py\n@@ -86,3 +86,7 @@\n \n class RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n+\n+\n+class BandOverviewError(UserWarning):\n+ \"\"\"Raised when a band overview access fails.\"\"\"\n", "issue": "Add `GetMetadataItem` like method \nRef: https://github.com/mapbox/rasterio/issues/1077, \r\n\r\nI'm proposing to add a new method in https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx to replicate GDAL GetMetadataItem\r\n\r\n\r\n**Method Name:** `get_metadata_item` or `get_metadata`\r\n**Why:** I need to be able to get TIFF metadata like `band.GetMetadataItem('IFD_OFFSET', 'TIFF')`\r\n\r\n**Code:**\r\n\r\n```cython\r\n def get_metadata(self, bidx, ns, dm=None, ovr=None):\r\n \"\"\"Returns metadata item\r\n\r\n Parameters\r\n ----------\r\n bidx: int\r\n Band index, starting with 1.\r\n name: str\r\n The key for the metadata item to fetch.\r\n domain: str\r\n The domain to fetch for.\r\n ovr: int\r\n Overview level\r\n\r\n Returns\r\n -------\r\n str\r\n \"\"\"\r\n cdef GDALMajorObjectH b = NULL\r\n cdef GDALMajorObjectH obj = NULL\r\n cdef char *value = NULL\r\n cdef const char *name = NULL\r\n cdef const char *domain = NULL\r\n\r\n ns = ns.encode('utf-8')\r\n name = ns\r\n\r\n if dm:\r\n dm = dm.encode('utf-8')\r\n domain = dm\r\n\r\n b = self.band(bidx)\r\n if ovr:\r\n b = GDALGetOverview(b, ovr)\r\n\r\n obj = b\r\n\r\n value = GDALGetMetadataItem(obj, name, domain)\r\n if value == NULL:\r\n return None\r\n else:\r\n return value\r\n```\r\n\r\n@sgillies I'm happy to submit a PR for that :-) \n", "before_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n", "path": "rasterio/errors.py"}]}
1,674
86
gh_patches_debug_33879
rasdani/github-patches
git_diff
TheAlgorithms__Python-9068
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Delete base85 algorithm ### Describe your change: Re #6216 Normally, I'm not in favour of just deleting algorithms, but I would make the argument that this is not an algorithm, rather just a snippet of code that utilises another library. Per `CONTRIBTUING.md` > Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values This `base85` algorithm has essentially got two lines of code that purely utilise a singular library. The doctests only test an external library This repository should not contains examples on how to use a certain library, that would be the library documentation here https://docs.python.org/3/library/base64.html * [ ] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? * [x] Delete an algorithm ### Checklist: * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. * [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER". </issue> <code> [start of ciphers/base32.py] 1 import base64 2 3 4 def base32_encode(string: str) -> bytes: 5 """ 6 Encodes a given string to base32, returning a bytes-like object 7 >>> base32_encode("Hello World!") 8 b'JBSWY3DPEBLW64TMMQQQ====' 9 >>> base32_encode("123456") 10 b'GEZDGNBVGY======' 11 >>> base32_encode("some long complex string") 12 b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=' 13 """ 14 15 # encoded the input (we need a bytes like object) 16 # then, b32encoded the bytes-like object 17 return base64.b32encode(string.encode("utf-8")) 18 19 20 def base32_decode(encoded_bytes: bytes) -> str: 21 """ 22 Decodes a given bytes-like object to a string, returning a string 23 >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====') 24 'Hello World!' 25 >>> base32_decode(b'GEZDGNBVGY======') 26 '123456' 27 >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=') 28 'some long complex string' 29 """ 30 31 # decode the bytes from base32 32 # then, decode the bytes-like object to return as a string 33 return base64.b32decode(encoded_bytes).decode("utf-8") 34 35 36 if __name__ == "__main__": 37 test = "Hello World!" 38 encoded = base32_encode(test) 39 print(encoded) 40 41 decoded = base32_decode(encoded) 42 print(decoded) 43 [end of ciphers/base32.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ciphers/base32.py b/ciphers/base32.py --- a/ciphers/base32.py +++ b/ciphers/base32.py @@ -1,42 +1,45 @@ -import base64 +""" +Base32 encoding and decoding +https://en.wikipedia.org/wiki/Base32 +""" +B32_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" -def base32_encode(string: str) -> bytes: + +def base32_encode(data: bytes) -> bytes: """ - Encodes a given string to base32, returning a bytes-like object - >>> base32_encode("Hello World!") + >>> base32_encode(b"Hello World!") b'JBSWY3DPEBLW64TMMQQQ====' - >>> base32_encode("123456") + >>> base32_encode(b"123456") b'GEZDGNBVGY======' - >>> base32_encode("some long complex string") + >>> base32_encode(b"some long complex string") b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=' """ - - # encoded the input (we need a bytes like object) - # then, b32encoded the bytes-like object - return base64.b32encode(string.encode("utf-8")) + binary_data = "".join(bin(ord(d))[2:].zfill(8) for d in data.decode("utf-8")) + binary_data = binary_data.ljust(5 * ((len(binary_data) // 5) + 1), "0") + b32_chunks = map("".join, zip(*[iter(binary_data)] * 5)) + b32_result = "".join(B32_CHARSET[int(chunk, 2)] for chunk in b32_chunks) + return bytes(b32_result.ljust(8 * ((len(b32_result) // 8) + 1), "="), "utf-8") -def base32_decode(encoded_bytes: bytes) -> str: +def base32_decode(data: bytes) -> bytes: """ - Decodes a given bytes-like object to a string, returning a string >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====') - 'Hello World!' + b'Hello World!' >>> base32_decode(b'GEZDGNBVGY======') - '123456' + b'123456' >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=') - 'some long complex string' + b'some long complex string' """ - - # decode the bytes from base32 - # then, decode the bytes-like object to return as a string - return base64.b32decode(encoded_bytes).decode("utf-8") + binary_chunks = "".join( + bin(B32_CHARSET.index(_d))[2:].zfill(5) + for _d in data.decode("utf-8").strip("=") + ) + binary_data = list(map("".join, zip(*[iter(binary_chunks)] * 8))) + return bytes("".join([chr(int(_d, 2)) for _d in binary_data]), "utf-8") if __name__ == "__main__": - test = "Hello World!" - encoded = base32_encode(test) - print(encoded) + import doctest - decoded = base32_decode(encoded) - print(decoded) + doctest.testmod()
{"golden_diff": "diff --git a/ciphers/base32.py b/ciphers/base32.py\n--- a/ciphers/base32.py\n+++ b/ciphers/base32.py\n@@ -1,42 +1,45 @@\n-import base64\n+\"\"\"\n+Base32 encoding and decoding\n \n+https://en.wikipedia.org/wiki/Base32\n+\"\"\"\n+B32_CHARSET = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\"\n \n-def base32_encode(string: str) -> bytes:\n+\n+def base32_encode(data: bytes) -> bytes:\n \"\"\"\n- Encodes a given string to base32, returning a bytes-like object\n- >>> base32_encode(\"Hello World!\")\n+ >>> base32_encode(b\"Hello World!\")\n b'JBSWY3DPEBLW64TMMQQQ===='\n- >>> base32_encode(\"123456\")\n+ >>> base32_encode(b\"123456\")\n b'GEZDGNBVGY======'\n- >>> base32_encode(\"some long complex string\")\n+ >>> base32_encode(b\"some long complex string\")\n b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY='\n \"\"\"\n-\n- # encoded the input (we need a bytes like object)\n- # then, b32encoded the bytes-like object\n- return base64.b32encode(string.encode(\"utf-8\"))\n+ binary_data = \"\".join(bin(ord(d))[2:].zfill(8) for d in data.decode(\"utf-8\"))\n+ binary_data = binary_data.ljust(5 * ((len(binary_data) // 5) + 1), \"0\")\n+ b32_chunks = map(\"\".join, zip(*[iter(binary_data)] * 5))\n+ b32_result = \"\".join(B32_CHARSET[int(chunk, 2)] for chunk in b32_chunks)\n+ return bytes(b32_result.ljust(8 * ((len(b32_result) // 8) + 1), \"=\"), \"utf-8\")\n \n \n-def base32_decode(encoded_bytes: bytes) -> str:\n+def base32_decode(data: bytes) -> bytes:\n \"\"\"\n- Decodes a given bytes-like object to a string, returning a string\n >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====')\n- 'Hello World!'\n+ b'Hello World!'\n >>> base32_decode(b'GEZDGNBVGY======')\n- '123456'\n+ b'123456'\n >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=')\n- 'some long complex string'\n+ b'some long complex string'\n \"\"\"\n-\n- # decode the bytes from base32\n- # then, decode the bytes-like object to return as a string\n- return base64.b32decode(encoded_bytes).decode(\"utf-8\")\n+ binary_chunks = \"\".join(\n+ bin(B32_CHARSET.index(_d))[2:].zfill(5)\n+ for _d in data.decode(\"utf-8\").strip(\"=\")\n+ )\n+ binary_data = list(map(\"\".join, zip(*[iter(binary_chunks)] * 8)))\n+ return bytes(\"\".join([chr(int(_d, 2)) for _d in binary_data]), \"utf-8\")\n \n \n if __name__ == \"__main__\":\n- test = \"Hello World!\"\n- encoded = base32_encode(test)\n- print(encoded)\n+ import doctest\n \n- decoded = base32_decode(encoded)\n- print(decoded)\n+ doctest.testmod()\n", "issue": "Delete base85 algorithm\n### Describe your change:\r\nRe #6216\r\n\r\nNormally, I'm not in favour of just deleting algorithms, but I would make the argument that this is not an algorithm, rather just a snippet of code that utilises another library.\r\n\r\nPer `CONTRIBTUING.md`\r\n> Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values\r\nThis `base85` algorithm has essentially got two lines of code that purely utilise a singular library. The doctests only test an external library\r\n\r\nThis repository should not contains examples on how to use a certain library, that would be the library documentation here\r\nhttps://docs.python.org/3/library/base64.html\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [ ] Fix a bug or typo in an existing algorithm?\r\n* [ ] Documentation change?\r\n* [x] Delete an algorithm\r\n\r\n### Checklist:\r\n* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [x] This pull request is all my own work -- I have not plagiarized.\r\n* [x] I know that pull requests will not be merged if they fail the automated tests.\r\n* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [x] All new Python files are placed inside an existing directory.\r\n* [x] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [x] All functions and variable names follow Python naming conventions.\r\n* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.\r\n* [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): \"Fixes #ISSUE-NUMBER\".\r\n\n", "before_files": [{"content": "import base64\n\n\ndef base32_encode(string: str) -> bytes:\n \"\"\"\n Encodes a given string to base32, returning a bytes-like object\n >>> base32_encode(\"Hello World!\")\n b'JBSWY3DPEBLW64TMMQQQ===='\n >>> base32_encode(\"123456\")\n b'GEZDGNBVGY======'\n >>> base32_encode(\"some long complex string\")\n b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY='\n \"\"\"\n\n # encoded the input (we need a bytes like object)\n # then, b32encoded the bytes-like object\n return base64.b32encode(string.encode(\"utf-8\"))\n\n\ndef base32_decode(encoded_bytes: bytes) -> str:\n \"\"\"\n Decodes a given bytes-like object to a string, returning a string\n >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====')\n 'Hello World!'\n >>> base32_decode(b'GEZDGNBVGY======')\n '123456'\n >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=')\n 'some long complex string'\n \"\"\"\n\n # decode the bytes from base32\n # then, decode the bytes-like object to return as a string\n return base64.b32decode(encoded_bytes).decode(\"utf-8\")\n\n\nif __name__ == \"__main__\":\n test = \"Hello World!\"\n encoded = base32_encode(test)\n print(encoded)\n\n decoded = base32_decode(encoded)\n print(decoded)\n", "path": "ciphers/base32.py"}]}
1,517
856
gh_patches_debug_1834
rasdani/github-patches
git_diff
mozilla__pontoon-3003
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> GetText check fails incorrectly on newline https://pontoon.mozilla.org/en-GB/all-projects/all-resources/?string=286055 If you copy the source string, an extra line is added at the back, and that fails the checks for GetText. </issue> <code> [start of pontoon/translations/forms.py] 1 from django import forms 2 from django.contrib.postgres.forms import SimpleArrayField 3 4 from pontoon.base.models import ( 5 Entity, 6 Locale, 7 ) 8 9 10 class CreateTranslationForm(forms.Form): 11 """ 12 Form for parameters to the `entities` view. 13 """ 14 15 entity = forms.IntegerField() 16 locale = forms.CharField() 17 plural_form = forms.CharField() 18 19 # Some file formats allow empty original strings and translations. 20 # We must allow both here. Validation is handled in pontoon.checks module. 21 original = forms.CharField(required=False) 22 translation = forms.CharField(required=False) 23 24 ignore_warnings = forms.BooleanField(required=False) 25 approve = forms.BooleanField(required=False) 26 force_suggestions = forms.BooleanField(required=False) 27 paths = forms.MultipleChoiceField(required=False) 28 machinery_sources = SimpleArrayField(forms.CharField(max_length=30), required=False) 29 30 def clean_paths(self): 31 try: 32 return self.data.getlist("paths[]") 33 except AttributeError: 34 # If the data source is not a QueryDict, it won't have a `getlist` method. 35 return self.data.get("paths[]") or [] 36 37 def clean_entity(self): 38 try: 39 return Entity.objects.get(pk=self.cleaned_data["entity"]) 40 except Entity.DoesNotExist: 41 raise forms.ValidationError(f"Entity `{self.entity}` could not be found") 42 43 def clean_locale(self): 44 try: 45 return Locale.objects.get(code=self.cleaned_data["locale"]) 46 except Locale.DoesNotExist: 47 raise forms.ValidationError(f"Locale `{self.entity}` could not be found") 48 49 def clean_plural_form(self): 50 if self.cleaned_data["plural_form"] == "-1": 51 return None 52 return self.cleaned_data["plural_form"] 53 54 def clean_translation(self): 55 return self.data.get("translation", "") 56 [end of pontoon/translations/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pontoon/translations/forms.py b/pontoon/translations/forms.py --- a/pontoon/translations/forms.py +++ b/pontoon/translations/forms.py @@ -51,5 +51,8 @@ return None return self.cleaned_data["plural_form"] + def clean_original(self): + return self.data.get("original", "") + def clean_translation(self): return self.data.get("translation", "")
{"golden_diff": "diff --git a/pontoon/translations/forms.py b/pontoon/translations/forms.py\n--- a/pontoon/translations/forms.py\n+++ b/pontoon/translations/forms.py\n@@ -51,5 +51,8 @@\n return None\n return self.cleaned_data[\"plural_form\"]\n \n+ def clean_original(self):\n+ return self.data.get(\"original\", \"\")\n+\n def clean_translation(self):\n return self.data.get(\"translation\", \"\")\n", "issue": "GetText check fails incorrectly on newline\nhttps://pontoon.mozilla.org/en-GB/all-projects/all-resources/?string=286055\r\n\r\nIf you copy the source string, an extra line is added at the back, and that fails the checks for GetText.\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n)\n\n\nclass CreateTranslationForm(forms.Form):\n \"\"\"\n Form for parameters to the `entities` view.\n \"\"\"\n\n entity = forms.IntegerField()\n locale = forms.CharField()\n plural_form = forms.CharField()\n\n # Some file formats allow empty original strings and translations.\n # We must allow both here. Validation is handled in pontoon.checks module.\n original = forms.CharField(required=False)\n translation = forms.CharField(required=False)\n\n ignore_warnings = forms.BooleanField(required=False)\n approve = forms.BooleanField(required=False)\n force_suggestions = forms.BooleanField(required=False)\n paths = forms.MultipleChoiceField(required=False)\n machinery_sources = SimpleArrayField(forms.CharField(max_length=30), required=False)\n\n def clean_paths(self):\n try:\n return self.data.getlist(\"paths[]\")\n except AttributeError:\n # If the data source is not a QueryDict, it won't have a `getlist` method.\n return self.data.get(\"paths[]\") or []\n\n def clean_entity(self):\n try:\n return Entity.objects.get(pk=self.cleaned_data[\"entity\"])\n except Entity.DoesNotExist:\n raise forms.ValidationError(f\"Entity `{self.entity}` could not be found\")\n\n def clean_locale(self):\n try:\n return Locale.objects.get(code=self.cleaned_data[\"locale\"])\n except Locale.DoesNotExist:\n raise forms.ValidationError(f\"Locale `{self.entity}` could not be found\")\n\n def clean_plural_form(self):\n if self.cleaned_data[\"plural_form\"] == \"-1\":\n return None\n return self.cleaned_data[\"plural_form\"]\n\n def clean_translation(self):\n return self.data.get(\"translation\", \"\")\n", "path": "pontoon/translations/forms.py"}]}
1,072
100
gh_patches_debug_8461
rasdani/github-patches
git_diff
deepset-ai__haystack-7603
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add e2e tests for pipeline evaluation for 2.x Test eval for 2.x with tiny dataset, check dataframes, isolated, integrated eval (migrate existing integration tests to e2e tests for 2.x) </issue> <code> [start of haystack/evaluation/eval_run_result.py] 1 from abc import ABC, abstractmethod 2 from copy import deepcopy 3 from typing import Any, Dict, List 4 from warnings import warn 5 6 from pandas import DataFrame 7 from pandas import concat as pd_concat 8 9 10 class BaseEvaluationRunResult(ABC): 11 """ 12 Represents the results of an evaluation run. 13 """ 14 15 @abstractmethod 16 def to_pandas(self) -> "DataFrame": 17 """ 18 Creates a Pandas DataFrame containing the scores of each metric for every input sample. 19 20 :returns: 21 Pandas DataFrame with the scores. 22 """ 23 24 @abstractmethod 25 def score_report(self) -> "DataFrame": 26 """ 27 Transforms the results into a Pandas DataFrame with the aggregated scores for each metric. 28 29 :returns: 30 Pandas DataFrame with the aggregated scores. 31 """ 32 33 @abstractmethod 34 def comparative_individual_scores_report(self, other: "BaseEvaluationRunResult") -> "DataFrame": 35 """ 36 Creates a Pandas DataFrame with the scores for each metric in the results of two different evaluation runs. 37 38 The inputs to both evaluation runs is assumed to be the same. 39 40 :param other: 41 Results of another evaluation run to compare with. 42 :returns: 43 Pandas DataFrame with the score comparison. 44 """ 45 46 47 class EvaluationRunResult(BaseEvaluationRunResult): 48 """ 49 Contains the inputs and the outputs of an evaluation pipeline and provides methods to inspect them. 50 """ 51 52 def __init__(self, run_name: str, inputs: Dict[str, List[Any]], results: Dict[str, Dict[str, Any]]): 53 """ 54 Initialize a new evaluation run result. 55 56 :param run_name: 57 Name of the evaluation run. 58 :param inputs: 59 Dictionary containing the inputs used for the run. 60 Each key is the name of the input and its value is 61 a list of input values. The length of the lists should 62 be the same. 63 :param results: 64 Dictionary containing the results of the evaluators 65 used in the evaluation pipeline. Each key is the name 66 of the metric and its value is dictionary with the following 67 keys: 68 - 'score': The aggregated score for the metric. 69 - 'individual_scores': A list of scores for each input sample. 70 """ 71 self.run_name = run_name 72 self.inputs = deepcopy(inputs) 73 self.results = deepcopy(results) 74 75 if len(inputs) == 0: 76 raise ValueError("No inputs provided.") 77 if len({len(l) for l in inputs.values()}) != 1: 78 raise ValueError("Lengths of the inputs should be the same.") 79 80 expected_len = len(next(iter(inputs.values()))) 81 82 for metric, outputs in results.items(): 83 if "score" not in outputs: 84 raise ValueError(f"Aggregate score missing for {metric}.") 85 if "individual_scores" not in outputs: 86 raise ValueError(f"Individual scores missing for {metric}.") 87 88 if len(outputs["individual_scores"]) != expected_len: 89 raise ValueError( 90 f"Length of individual scores for '{metric}' should be the same as the inputs. " 91 f"Got {len(outputs['individual_scores'])} but expected {expected_len}." 92 ) 93 94 def score_report(self) -> DataFrame: # noqa: D102 95 results = {k: v["score"] for k, v in self.results.items()} 96 return DataFrame.from_dict(results, orient="index", columns=["score"]) 97 98 def to_pandas(self) -> DataFrame: # noqa: D102 99 inputs_columns = list(self.inputs.keys()) 100 inputs_values = list(self.inputs.values()) 101 inputs_values = list(map(list, zip(*inputs_values))) # transpose the values 102 df_inputs = DataFrame(inputs_values, columns=inputs_columns) 103 104 scores_columns = list(self.results.keys()) 105 scores_values = [v["individual_scores"] for v in self.results.values()] 106 scores_values = list(map(list, zip(*scores_values))) # transpose the values 107 df_scores = DataFrame(scores_values, columns=scores_columns) 108 109 return df_inputs.join(df_scores) 110 111 def comparative_individual_scores_report(self, other: "BaseEvaluationRunResult") -> DataFrame: # noqa: D102 112 if not isinstance(other, EvaluationRunResult): 113 raise ValueError("Comparative scores can only be computed between EvaluationRunResults.") 114 115 this_name = self.run_name 116 other_name = other.run_name 117 if this_name == other_name: 118 warn(f"The run names of the two evaluation results are the same ('{this_name}')") 119 this_name = f"{this_name}_first" 120 other_name = f"{other_name}_second" 121 122 if self.inputs != other.inputs: 123 warn(f"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.") 124 125 pipe_a_df = self.to_pandas() 126 pipe_b_df = other.to_pandas() 127 128 ignore = list(self.inputs.keys()) 129 pipe_b_df.drop(columns=ignore, inplace=True, errors="ignore") 130 pipe_b_df.columns = [f"{other_name}_{column}" for column in pipe_b_df.columns] # type: ignore 131 pipe_a_df.columns = [ 132 f"{this_name}_{col}" if col not in ignore else col for col in pipe_a_df.columns 133 ] # type: ignore 134 135 results_df = pd_concat([pipe_a_df, pipe_b_df], axis=1) 136 return results_df 137 [end of haystack/evaluation/eval_run_result.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/evaluation/eval_run_result.py b/haystack/evaluation/eval_run_result.py --- a/haystack/evaluation/eval_run_result.py +++ b/haystack/evaluation/eval_run_result.py @@ -119,8 +119,8 @@ this_name = f"{this_name}_first" other_name = f"{other_name}_second" - if self.inputs != other.inputs: - warn(f"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.") + if self.inputs.keys() != other.inputs.keys(): + warn(f"The input columns differ between the results; using the input columns of '{this_name}'.") pipe_a_df = self.to_pandas() pipe_b_df = other.to_pandas()
{"golden_diff": "diff --git a/haystack/evaluation/eval_run_result.py b/haystack/evaluation/eval_run_result.py\n--- a/haystack/evaluation/eval_run_result.py\n+++ b/haystack/evaluation/eval_run_result.py\n@@ -119,8 +119,8 @@\n this_name = f\"{this_name}_first\"\n other_name = f\"{other_name}_second\"\n \n- if self.inputs != other.inputs:\n- warn(f\"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.\")\n+ if self.inputs.keys() != other.inputs.keys():\n+ warn(f\"The input columns differ between the results; using the input columns of '{this_name}'.\")\n \n pipe_a_df = self.to_pandas()\n pipe_b_df = other.to_pandas()\n", "issue": "Add e2e tests for pipeline evaluation for 2.x\nTest eval for 2.x with tiny dataset, check dataframes, isolated, integrated eval (migrate existing integration tests to e2e tests for 2.x)\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom typing import Any, Dict, List\nfrom warnings import warn\n\nfrom pandas import DataFrame\nfrom pandas import concat as pd_concat\n\n\nclass BaseEvaluationRunResult(ABC):\n \"\"\"\n Represents the results of an evaluation run.\n \"\"\"\n\n @abstractmethod\n def to_pandas(self) -> \"DataFrame\":\n \"\"\"\n Creates a Pandas DataFrame containing the scores of each metric for every input sample.\n\n :returns:\n Pandas DataFrame with the scores.\n \"\"\"\n\n @abstractmethod\n def score_report(self) -> \"DataFrame\":\n \"\"\"\n Transforms the results into a Pandas DataFrame with the aggregated scores for each metric.\n\n :returns:\n Pandas DataFrame with the aggregated scores.\n \"\"\"\n\n @abstractmethod\n def comparative_individual_scores_report(self, other: \"BaseEvaluationRunResult\") -> \"DataFrame\":\n \"\"\"\n Creates a Pandas DataFrame with the scores for each metric in the results of two different evaluation runs.\n\n The inputs to both evaluation runs is assumed to be the same.\n\n :param other:\n Results of another evaluation run to compare with.\n :returns:\n Pandas DataFrame with the score comparison.\n \"\"\"\n\n\nclass EvaluationRunResult(BaseEvaluationRunResult):\n \"\"\"\n Contains the inputs and the outputs of an evaluation pipeline and provides methods to inspect them.\n \"\"\"\n\n def __init__(self, run_name: str, inputs: Dict[str, List[Any]], results: Dict[str, Dict[str, Any]]):\n \"\"\"\n Initialize a new evaluation run result.\n\n :param run_name:\n Name of the evaluation run.\n :param inputs:\n Dictionary containing the inputs used for the run.\n Each key is the name of the input and its value is\n a list of input values. The length of the lists should\n be the same.\n :param results:\n Dictionary containing the results of the evaluators\n used in the evaluation pipeline. Each key is the name\n of the metric and its value is dictionary with the following\n keys:\n - 'score': The aggregated score for the metric.\n - 'individual_scores': A list of scores for each input sample.\n \"\"\"\n self.run_name = run_name\n self.inputs = deepcopy(inputs)\n self.results = deepcopy(results)\n\n if len(inputs) == 0:\n raise ValueError(\"No inputs provided.\")\n if len({len(l) for l in inputs.values()}) != 1:\n raise ValueError(\"Lengths of the inputs should be the same.\")\n\n expected_len = len(next(iter(inputs.values())))\n\n for metric, outputs in results.items():\n if \"score\" not in outputs:\n raise ValueError(f\"Aggregate score missing for {metric}.\")\n if \"individual_scores\" not in outputs:\n raise ValueError(f\"Individual scores missing for {metric}.\")\n\n if len(outputs[\"individual_scores\"]) != expected_len:\n raise ValueError(\n f\"Length of individual scores for '{metric}' should be the same as the inputs. \"\n f\"Got {len(outputs['individual_scores'])} but expected {expected_len}.\"\n )\n\n def score_report(self) -> DataFrame: # noqa: D102\n results = {k: v[\"score\"] for k, v in self.results.items()}\n return DataFrame.from_dict(results, orient=\"index\", columns=[\"score\"])\n\n def to_pandas(self) -> DataFrame: # noqa: D102\n inputs_columns = list(self.inputs.keys())\n inputs_values = list(self.inputs.values())\n inputs_values = list(map(list, zip(*inputs_values))) # transpose the values\n df_inputs = DataFrame(inputs_values, columns=inputs_columns)\n\n scores_columns = list(self.results.keys())\n scores_values = [v[\"individual_scores\"] for v in self.results.values()]\n scores_values = list(map(list, zip(*scores_values))) # transpose the values\n df_scores = DataFrame(scores_values, columns=scores_columns)\n\n return df_inputs.join(df_scores)\n\n def comparative_individual_scores_report(self, other: \"BaseEvaluationRunResult\") -> DataFrame: # noqa: D102\n if not isinstance(other, EvaluationRunResult):\n raise ValueError(\"Comparative scores can only be computed between EvaluationRunResults.\")\n\n this_name = self.run_name\n other_name = other.run_name\n if this_name == other_name:\n warn(f\"The run names of the two evaluation results are the same ('{this_name}')\")\n this_name = f\"{this_name}_first\"\n other_name = f\"{other_name}_second\"\n\n if self.inputs != other.inputs:\n warn(f\"The inputs to the two evaluation results differ; using the inputs of '{this_name}'.\")\n\n pipe_a_df = self.to_pandas()\n pipe_b_df = other.to_pandas()\n\n ignore = list(self.inputs.keys())\n pipe_b_df.drop(columns=ignore, inplace=True, errors=\"ignore\")\n pipe_b_df.columns = [f\"{other_name}_{column}\" for column in pipe_b_df.columns] # type: ignore\n pipe_a_df.columns = [\n f\"{this_name}_{col}\" if col not in ignore else col for col in pipe_a_df.columns\n ] # type: ignore\n\n results_df = pd_concat([pipe_a_df, pipe_b_df], axis=1)\n return results_df\n", "path": "haystack/evaluation/eval_run_result.py"}]}
2,037
176
gh_patches_debug_13129
rasdani/github-patches
git_diff
liqd__a4-meinberlin-613
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> datetimepicker unique ids <img width="980" alt="screen shot 2017-06-01 at 15 59 21" src="https://cloud.githubusercontent.com/assets/16354712/26683219/61f8eb4c-46e3-11e7-851b-148bff927bd4.png"> </issue> <code> [start of apps/contrib/widgets.py] 1 import datetime 2 from itertools import chain 3 4 import django_filters 5 from django.contrib.staticfiles.storage import staticfiles_storage 6 from django.db.models.fields import BLANK_CHOICE_DASH 7 from django.forms import widgets as form_widgets 8 from django.forms.widgets import flatatt 9 from django.template.loader import render_to_string 10 from django.utils import formats 11 from django.utils.timezone import localtime 12 from django.utils.translation import ugettext as _ 13 14 15 class DropdownLinkWidget(django_filters.widgets.LinkWidget): 16 label = None 17 right = False 18 template = 'meinberlin_contrib/widgets/dropdown_link.html' 19 20 def get_option_label(self, value, choices=()): 21 option_label = BLANK_CHOICE_DASH[0][1] 22 23 for v, label in chain(self.choices, choices): 24 if str(v) == value: 25 option_label = label 26 break 27 28 if option_label == BLANK_CHOICE_DASH[0][1]: 29 option_label = _('All') 30 31 return option_label 32 33 def render(self, name, value, attrs=None, choices=()): 34 all_choices = list(chain(self.choices, choices)) 35 36 if len(all_choices) <= 1: 37 return '' 38 39 if value is None: 40 value = all_choices[0][0] 41 42 _id = attrs.pop('id') 43 final_attrs = flatatt(self.build_attrs(attrs)) 44 value_label = self.get_option_label(value, choices=choices) 45 46 options = super().render(name, value, attrs={ 47 'class': 'dropdown-menu', 48 'aria-labelledby': _id, 49 }, choices=choices) 50 51 return render_to_string(self.template, { 52 'options': options, 53 'id': _id, 54 'attrs': final_attrs, 55 'value_label': value_label, 56 'label': self.label, 57 'right': self.right, 58 }) 59 60 61 class DateTimeInput(form_widgets.SplitDateTimeWidget): 62 class Media: 63 js = ( 64 staticfiles_storage.url('datepicker.js'), 65 ) 66 css = {'all': [ 67 staticfiles_storage.url('datepicker.css'), 68 ]} 69 70 def render(self, name, value, attrs=None): 71 date_attrs = self.build_attrs(attrs) 72 date_attrs.update({ 73 'class': 'datepicker', 74 'placeholder': formats.localize_input(datetime.date.today()) 75 }) 76 time_attrs = self.build_attrs(attrs) 77 time_attrs.update({ 78 'class': 'timepicker', 79 'placeholder': '00:00', 80 }) 81 82 if isinstance(value, datetime.datetime): 83 value = localtime(value) 84 date = value.date() 85 time = value.time() 86 else: 87 # value's just a list in case of an error 88 date = value[0] if value else None 89 time = value[1] if value else '00:00' 90 91 return render_to_string('datetime_input.html', { 92 'date': self.widgets[0].render( 93 name + '_0', 94 date, 95 date_attrs 96 ), 97 'time': self.widgets[1].render( 98 name + '_1', 99 time, 100 time_attrs 101 ) 102 }) 103 [end of apps/contrib/widgets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/contrib/widgets.py b/apps/contrib/widgets.py --- a/apps/contrib/widgets.py +++ b/apps/contrib/widgets.py @@ -71,12 +71,14 @@ date_attrs = self.build_attrs(attrs) date_attrs.update({ 'class': 'datepicker', - 'placeholder': formats.localize_input(datetime.date.today()) + 'placeholder': formats.localize_input(datetime.date.today()), + 'id': name + '_date' }) time_attrs = self.build_attrs(attrs) time_attrs.update({ 'class': 'timepicker', 'placeholder': '00:00', + 'id': name + '_time' }) if isinstance(value, datetime.datetime):
{"golden_diff": "diff --git a/apps/contrib/widgets.py b/apps/contrib/widgets.py\n--- a/apps/contrib/widgets.py\n+++ b/apps/contrib/widgets.py\n@@ -71,12 +71,14 @@\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n- 'placeholder': formats.localize_input(datetime.date.today())\n+ 'placeholder': formats.localize_input(datetime.date.today()),\n+ 'id': name + '_date'\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': '00:00',\n+ 'id': name + '_time'\n })\n \n if isinstance(value, datetime.datetime):\n", "issue": "datetimepicker unique ids\n<img width=\"980\" alt=\"screen shot 2017-06-01 at 15 59 21\" src=\"https://cloud.githubusercontent.com/assets/16354712/26683219/61f8eb4c-46e3-11e7-851b-148bff927bd4.png\">\r\n\n", "before_files": [{"content": "import datetime\nfrom itertools import chain\n\nimport django_filters\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.db.models.fields import BLANK_CHOICE_DASH\nfrom django.forms import widgets as form_widgets\nfrom django.forms.widgets import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils import formats\nfrom django.utils.timezone import localtime\nfrom django.utils.translation import ugettext as _\n\n\nclass DropdownLinkWidget(django_filters.widgets.LinkWidget):\n label = None\n right = False\n template = 'meinberlin_contrib/widgets/dropdown_link.html'\n\n def get_option_label(self, value, choices=()):\n option_label = BLANK_CHOICE_DASH[0][1]\n\n for v, label in chain(self.choices, choices):\n if str(v) == value:\n option_label = label\n break\n\n if option_label == BLANK_CHOICE_DASH[0][1]:\n option_label = _('All')\n\n return option_label\n\n def render(self, name, value, attrs=None, choices=()):\n all_choices = list(chain(self.choices, choices))\n\n if len(all_choices) <= 1:\n return ''\n\n if value is None:\n value = all_choices[0][0]\n\n _id = attrs.pop('id')\n final_attrs = flatatt(self.build_attrs(attrs))\n value_label = self.get_option_label(value, choices=choices)\n\n options = super().render(name, value, attrs={\n 'class': 'dropdown-menu',\n 'aria-labelledby': _id,\n }, choices=choices)\n\n return render_to_string(self.template, {\n 'options': options,\n 'id': _id,\n 'attrs': final_attrs,\n 'value_label': value_label,\n 'label': self.label,\n 'right': self.right,\n })\n\n\nclass DateTimeInput(form_widgets.SplitDateTimeWidget):\n class Media:\n js = (\n staticfiles_storage.url('datepicker.js'),\n )\n css = {'all': [\n staticfiles_storage.url('datepicker.css'),\n ]}\n\n def render(self, name, value, attrs=None):\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n 'placeholder': formats.localize_input(datetime.date.today())\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': '00:00',\n })\n\n if isinstance(value, datetime.datetime):\n value = localtime(value)\n date = value.date()\n time = value.time()\n else:\n # value's just a list in case of an error\n date = value[0] if value else None\n time = value[1] if value else '00:00'\n\n return render_to_string('datetime_input.html', {\n 'date': self.widgets[0].render(\n name + '_0',\n date,\n date_attrs\n ),\n 'time': self.widgets[1].render(\n name + '_1',\n time,\n time_attrs\n )\n })\n", "path": "apps/contrib/widgets.py"}]}
1,500
160
gh_patches_debug_17569
rasdani/github-patches
git_diff
python-discord__site-304
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make newlines visible in the deleted messages front-end The deleted messages front-end currently doesn't display newlines if they're consecutive, i.e., if the lines are otherwise empty (`"\n\n\n\n\n\n\n\n\n"`). This makes it difficult to interpret what actually happened in chat. A good solution to this would be to make newlines character visible using the `↵` character, printed in a light gray color (similar to how IDEs do it). **before** ![2019-10-27_18-30](https://user-images.githubusercontent.com/33516116/67638694-d53a1c80-f8e7-11e9-9971-d022a1d90a17.png) **proposed after** ![2019-10-27_18-29](https://user-images.githubusercontent.com/33516116/67638696-e08d4800-f8e7-11e9-8a7d-acf41e2b1f8a.png) </issue> <code> [start of pydis_site/apps/staff/templatetags/deletedmessage_filters.py] 1 from datetime import datetime 2 3 from django import template 4 5 register = template.Library() 6 7 8 @register.filter 9 def hex_colour(color: int) -> str: 10 """Converts an integer representation of a colour to the RGB hex value.""" 11 return f"#{color:0>6X}" 12 13 14 @register.filter 15 def footer_datetime(timestamp: str) -> datetime: 16 """Takes an embed timestamp and returns a timezone-aware datetime object.""" 17 return datetime.fromisoformat(timestamp) 18 [end of pydis_site/apps/staff/templatetags/deletedmessage_filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py --- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py +++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py @@ -7,11 +7,22 @@ @register.filter def hex_colour(color: int) -> str: - """Converts an integer representation of a colour to the RGB hex value.""" - return f"#{color:0>6X}" + """ + Converts an integer representation of a colour to the RGB hex value. + + As we are using a Discord dark theme analogue, black colours are returned as white instead. + """ + colour = f"#{color:0>6X}" + return colour if colour != "#000000" else "#FFFFFF" @register.filter def footer_datetime(timestamp: str) -> datetime: """Takes an embed timestamp and returns a timezone-aware datetime object.""" return datetime.fromisoformat(timestamp) + + [email protected] +def visible_newlines(text: str) -> str: + """Takes an embed timestamp and returns a timezone-aware datetime object.""" + return text.replace("\n", " <span class='has-text-grey'>↵</span><br>")
{"golden_diff": "diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n--- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n+++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n@@ -7,11 +7,22 @@\n \n @register.filter\n def hex_colour(color: int) -> str:\n- \"\"\"Converts an integer representation of a colour to the RGB hex value.\"\"\"\n- return f\"#{color:0>6X}\"\n+ \"\"\"\n+ Converts an integer representation of a colour to the RGB hex value.\n+\n+ As we are using a Discord dark theme analogue, black colours are returned as white instead.\n+ \"\"\"\n+ colour = f\"#{color:0>6X}\"\n+ return colour if colour != \"#000000\" else \"#FFFFFF\"\n \n \n @register.filter\n def footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n+\n+\[email protected]\n+def visible_newlines(text: str) -> str:\n+ \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n+ return text.replace(\"\\n\", \" <span class='has-text-grey'>\u21b5</span><br>\")\n", "issue": "Make newlines visible in the deleted messages front-end\nThe deleted messages front-end currently doesn't display newlines if they're consecutive, i.e., if the lines are otherwise empty (`\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\"`). This makes it difficult to interpret what actually happened in chat. A good solution to this would be to make newlines character visible using the `\u21b5` character, printed in a light gray color (similar to how IDEs do it).\r\n\r\n**before**\r\n![2019-10-27_18-30](https://user-images.githubusercontent.com/33516116/67638694-d53a1c80-f8e7-11e9-9971-d022a1d90a17.png)\r\n\r\n**proposed after**\r\n![2019-10-27_18-29](https://user-images.githubusercontent.com/33516116/67638696-e08d4800-f8e7-11e9-8a7d-acf41e2b1f8a.png)\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef hex_colour(color: int) -> str:\n \"\"\"Converts an integer representation of a colour to the RGB hex value.\"\"\"\n return f\"#{color:0>6X}\"\n\n\[email protected]\ndef footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n", "path": "pydis_site/apps/staff/templatetags/deletedmessage_filters.py"}]}
947
305
gh_patches_debug_1191
rasdani/github-patches
git_diff
huggingface__dataset-viewer-2409
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Retry jobs that finish with `ClientConnection` error? Maybe here: https://github.com/huggingface/datasets-server/blob/f311a9212aaa91dd0373e5c2d4f5da9b6bdabcb5/chart/env/prod.yaml#L209 Internal conversation on Slack: https://huggingface.slack.com/archives/C0311GZ7R6K/p1698224875005729 Anyway: I'm wondering if we can have the error now that the dataset scripts are disabled by default. </issue> <code> [start of libs/libcommon/src/libcommon/constants.py] 1 # SPDX-License-Identifier: Apache-2.0 2 # Copyright 2022 The HuggingFace Authors. 3 4 CACHE_COLLECTION_RESPONSES = "cachedResponsesBlue" 5 CACHE_MONGOENGINE_ALIAS = "cache" 6 HF_DATASETS_CACHE_APPNAME = "hf_datasets_cache" 7 PARQUET_METADATA_CACHE_APPNAME = "datasets_server_parquet_metadata" 8 DESCRIPTIVE_STATISTICS_CACHE_APPNAME = "datasets_server_descriptive_statistics" 9 DUCKDB_INDEX_CACHE_APPNAME = "datasets_server_duckdb_index" 10 DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = "downloads" 11 DUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = "job_runner" 12 CACHE_METRICS_COLLECTION = "cacheTotalMetric" 13 QUEUE_METRICS_COLLECTION = "jobTotalMetric" 14 METRICS_MONGOENGINE_ALIAS = "metrics" 15 QUEUE_COLLECTION_JOBS = "jobsBlue" 16 QUEUE_COLLECTION_LOCKS = "locks" 17 QUEUE_MONGOENGINE_ALIAS = "queue" 18 QUEUE_TTL_SECONDS = 600 # 10 minutes 19 LOCK_TTL_SECONDS_NO_OWNER = 600 # 10 minutes 20 LOCK_TTL_SECONDS_TO_START_JOB = 600 # 10 minutes 21 LOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 # 1 hour 22 23 MAX_FAILED_RUNS = 3 24 DATASET_SEPARATOR = "--" 25 DEFAULT_DIFFICULTY = 50 26 DEFAULT_DIFFICULTY_MAX = 100 27 DEFAULT_DIFFICULTY_MIN = 0 28 DEFAULT_INPUT_TYPE = "dataset" 29 DEFAULT_JOB_RUNNER_VERSION = 1 30 DIFFICULTY_BONUS_BY_FAILED_RUNS = 20 31 MIN_BYTES_FOR_BONUS_DIFFICULTY = 3_000_000_000 32 33 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 34 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 35 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 36 PARQUET_REVISION = "refs/convert/parquet" 37 38 ERROR_CODES_TO_RETRY = { 39 "CreateCommitError", 40 "ExternalServerError", 41 "JobManagerCrashedError", 42 "LockedDatasetTimeoutError", 43 "StreamingRowsError", 44 } 45 46 EXTERNAL_DATASET_SCRIPT_PATTERN = "datasets_modules/datasets" 47 48 # Arrays are not immutable, we have to take care of not modifying them 49 # Anyway: in all this file, we allow constant reassignment (no use of Final) 50 CONFIG_HAS_VIEWER_KINDS = ["config-size"] 51 CONFIG_INFO_KINDS = ["config-info"] 52 CONFIG_PARQUET_METADATA_KINDS = ["config-parquet-metadata"] 53 CONFIG_PARQUET_AND_METADATA_KINDS = ["config-parquet", "config-parquet-metadata"] 54 CONFIG_SPLIT_NAMES_KINDS = ["config-split-names-from-info", "config-split-names-from-streaming"] 55 DATASET_CONFIG_NAMES_KINDS = ["dataset-config-names"] 56 DATASET_INFO_KINDS = ["dataset-info"] 57 SPLIT_DUCKDB_INDEX_KINDS = ["split-duckdb-index"] 58 SPLIT_HAS_PREVIEW_KINDS = ["split-first-rows-from-streaming", "split-first-rows-from-parquet"] 59 SPLIT_HAS_SEARCH_KINDS = ["split-duckdb-index"] 60 PARALLEL_STEPS_LISTS = [ 61 CONFIG_SPLIT_NAMES_KINDS, 62 SPLIT_HAS_PREVIEW_KINDS, 63 ] 64 65 CROISSANT_MAX_CONFIGS = 100 66 MAX_NUM_ROWS_PER_PAGE = 100 67 [end of libs/libcommon/src/libcommon/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py --- a/libs/libcommon/src/libcommon/constants.py +++ b/libs/libcommon/src/libcommon/constants.py @@ -36,6 +36,7 @@ PARQUET_REVISION = "refs/convert/parquet" ERROR_CODES_TO_RETRY = { + "ConnectionError", "CreateCommitError", "ExternalServerError", "JobManagerCrashedError",
{"golden_diff": "diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py\n--- a/libs/libcommon/src/libcommon/constants.py\n+++ b/libs/libcommon/src/libcommon/constants.py\n@@ -36,6 +36,7 @@\n PARQUET_REVISION = \"refs/convert/parquet\"\n \n ERROR_CODES_TO_RETRY = {\n+ \"ConnectionError\",\n \"CreateCommitError\",\n \"ExternalServerError\",\n \"JobManagerCrashedError\",\n", "issue": "Retry jobs that finish with `ClientConnection` error?\nMaybe here: https://github.com/huggingface/datasets-server/blob/f311a9212aaa91dd0373e5c2d4f5da9b6bdabcb5/chart/env/prod.yaml#L209\r\n\r\nInternal conversation on Slack: https://huggingface.slack.com/archives/C0311GZ7R6K/p1698224875005729\r\n\r\nAnyway: I'm wondering if we can have the error now that the dataset scripts are disabled by default.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nCACHE_COLLECTION_RESPONSES = \"cachedResponsesBlue\"\nCACHE_MONGOENGINE_ALIAS = \"cache\"\nHF_DATASETS_CACHE_APPNAME = \"hf_datasets_cache\"\nPARQUET_METADATA_CACHE_APPNAME = \"datasets_server_parquet_metadata\"\nDESCRIPTIVE_STATISTICS_CACHE_APPNAME = \"datasets_server_descriptive_statistics\"\nDUCKDB_INDEX_CACHE_APPNAME = \"datasets_server_duckdb_index\"\nDUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = \"downloads\"\nDUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = \"job_runner\"\nCACHE_METRICS_COLLECTION = \"cacheTotalMetric\"\nQUEUE_METRICS_COLLECTION = \"jobTotalMetric\"\nMETRICS_MONGOENGINE_ALIAS = \"metrics\"\nQUEUE_COLLECTION_JOBS = \"jobsBlue\"\nQUEUE_COLLECTION_LOCKS = \"locks\"\nQUEUE_MONGOENGINE_ALIAS = \"queue\"\nQUEUE_TTL_SECONDS = 600 # 10 minutes\nLOCK_TTL_SECONDS_NO_OWNER = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_START_JOB = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 # 1 hour\n\nMAX_FAILED_RUNS = 3\nDATASET_SEPARATOR = \"--\"\nDEFAULT_DIFFICULTY = 50\nDEFAULT_DIFFICULTY_MAX = 100\nDEFAULT_DIFFICULTY_MIN = 0\nDEFAULT_INPUT_TYPE = \"dataset\"\nDEFAULT_JOB_RUNNER_VERSION = 1\nDIFFICULTY_BONUS_BY_FAILED_RUNS = 20\nMIN_BYTES_FOR_BONUS_DIFFICULTY = 3_000_000_000\n\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100\nPARQUET_REVISION = \"refs/convert/parquet\"\n\nERROR_CODES_TO_RETRY = {\n \"CreateCommitError\",\n \"ExternalServerError\",\n \"JobManagerCrashedError\",\n \"LockedDatasetTimeoutError\",\n \"StreamingRowsError\",\n}\n\nEXTERNAL_DATASET_SCRIPT_PATTERN = \"datasets_modules/datasets\"\n\n# Arrays are not immutable, we have to take care of not modifying them\n# Anyway: in all this file, we allow constant reassignment (no use of Final)\nCONFIG_HAS_VIEWER_KINDS = [\"config-size\"]\nCONFIG_INFO_KINDS = [\"config-info\"]\nCONFIG_PARQUET_METADATA_KINDS = [\"config-parquet-metadata\"]\nCONFIG_PARQUET_AND_METADATA_KINDS = [\"config-parquet\", \"config-parquet-metadata\"]\nCONFIG_SPLIT_NAMES_KINDS = [\"config-split-names-from-info\", \"config-split-names-from-streaming\"]\nDATASET_CONFIG_NAMES_KINDS = [\"dataset-config-names\"]\nDATASET_INFO_KINDS = [\"dataset-info\"]\nSPLIT_DUCKDB_INDEX_KINDS = [\"split-duckdb-index\"]\nSPLIT_HAS_PREVIEW_KINDS = [\"split-first-rows-from-streaming\", \"split-first-rows-from-parquet\"]\nSPLIT_HAS_SEARCH_KINDS = [\"split-duckdb-index\"]\nPARALLEL_STEPS_LISTS = [\n CONFIG_SPLIT_NAMES_KINDS,\n SPLIT_HAS_PREVIEW_KINDS,\n]\n\nCROISSANT_MAX_CONFIGS = 100\nMAX_NUM_ROWS_PER_PAGE = 100\n", "path": "libs/libcommon/src/libcommon/constants.py"}]}
1,540
103
gh_patches_debug_10313
rasdani/github-patches
git_diff
nilearn__nilearn-4049
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DOC] Fix Sphinx issues related to adding new experimental module and typing ### Is there an existing issue for this? - [X] I have searched the existing issues ### Describe your proposed suggestion in detail. 1. Linkcode extension error https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739213117 and related workaround: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/sphinxext/github_link.py#L43-L44 2. See Sphinx warning https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739630179 and related hotfix of remving sphinx role here: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/modules/experimental.rst?plain=1#L10 3. Sphinx autodoc has an issue with resolving external libraries like numpy when using typehints. It seems to be a bug similar to what is reported here https://github.com/sphinx-doc/sphinx/issues/10785. For now this autodoc argument is added to hide the typehints from the integrated typing https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/conf.py#L57 And if we have both docstrings and the typehints we have double rendering in the docs of the description of a e.g. class and this is redundant and looks ugly so we should also discuss how we want to approach adding docstrings + typing and how to configure what will be displayed. I'll post an example once the dev docs are built ### List any pages that would be impacted. _No response_ </issue> <code> [start of doc/sphinxext/github_link.py] 1 import inspect 2 import os 3 import subprocess 4 import sys 5 from functools import partial 6 from operator import attrgetter 7 8 REVISION_CMD = "git rev-parse --short HEAD" 9 10 11 def _get_git_revision(): 12 try: 13 revision = subprocess.check_output(REVISION_CMD.split()).strip() 14 except (subprocess.CalledProcessError, OSError): 15 print("Failed to execute git to get revision") 16 return None 17 return revision.decode("utf-8") 18 19 20 def _linkcode_resolve(domain, info, package, url_fmt, revision): 21 """Determine a link to online source for a class/method/function. 22 23 This is called by sphinx.ext.linkcode 24 25 An example with a long-untouched module that everyone has 26 >>> _linkcode_resolve('py', {'module': 'tty', 27 ... 'fullname': 'setraw'}, 28 ... package='tty', 29 ... url_fmt='http://hg.python.org/cpython/file/' 30 ... '{revision}/Lib/{package}/{path}#L{lineno}', 31 ... revision='xxxx') 32 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' 33 """ 34 if revision is None: 35 return 36 if domain not in ("py", "pyx"): 37 return 38 if not info.get("module") or not info.get("fullname"): 39 return 40 41 class_name = info["fullname"].split(".")[0] 42 module = __import__(info["module"], fromlist=[class_name]) 43 if info["module"] == "nilearn.experimental.surface": 44 return 45 obj = attrgetter(info["fullname"])(module) 46 47 # Unwrap the object to get the correct source 48 # file in case that is wrapped by a decorator 49 obj = inspect.unwrap(obj) 50 51 try: 52 fn = inspect.getsourcefile(obj) 53 except Exception: 54 fn = None 55 if not fn: 56 try: 57 fn = inspect.getsourcefile(sys.modules[obj.__module__]) 58 except Exception: 59 fn = None 60 if not fn: 61 return 62 63 # Don't include filenames from outside this package's tree 64 if os.path.dirname(__import__(package).__file__) not in fn: 65 return 66 67 fn = os.path.relpath( 68 fn, start=os.path.dirname(__import__(package).__file__) 69 ) 70 try: 71 lineno = inspect.getsourcelines(obj)[1] 72 except Exception: 73 lineno = "" 74 return url_fmt.format( 75 revision=revision, package=package, path=fn, lineno=lineno 76 ) 77 78 79 def make_linkcode_resolve(package, url_fmt): 80 """Return a linkcode_resolve function for the given URL format. 81 82 revision is a git commit reference (hash or name) 83 84 package is the name of the root module of the package 85 86 url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 87 'blob/{revision}/{package}/' 88 '{path}#L{lineno}') 89 """ 90 revision = _get_git_revision() 91 return partial( 92 _linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt 93 ) 94 [end of doc/sphinxext/github_link.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/sphinxext/github_link.py b/doc/sphinxext/github_link.py --- a/doc/sphinxext/github_link.py +++ b/doc/sphinxext/github_link.py @@ -40,9 +40,12 @@ class_name = info["fullname"].split(".")[0] module = __import__(info["module"], fromlist=[class_name]) - if info["module"] == "nilearn.experimental.surface": + # For typed parameters, this will try to get uninitialized attributes + # and fail + try: + obj = attrgetter(info["fullname"])(module) + except AttributeError: return - obj = attrgetter(info["fullname"])(module) # Unwrap the object to get the correct source # file in case that is wrapped by a decorator
{"golden_diff": "diff --git a/doc/sphinxext/github_link.py b/doc/sphinxext/github_link.py\n--- a/doc/sphinxext/github_link.py\n+++ b/doc/sphinxext/github_link.py\n@@ -40,9 +40,12 @@\n \n class_name = info[\"fullname\"].split(\".\")[0]\n module = __import__(info[\"module\"], fromlist=[class_name])\n- if info[\"module\"] == \"nilearn.experimental.surface\":\n+ # For typed parameters, this will try to get uninitialized attributes\n+ # and fail\n+ try:\n+ obj = attrgetter(info[\"fullname\"])(module)\n+ except AttributeError:\n return\n- obj = attrgetter(info[\"fullname\"])(module)\n \n # Unwrap the object to get the correct source\n # file in case that is wrapped by a decorator\n", "issue": "[DOC] Fix Sphinx issues related to adding new experimental module and typing\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Describe your proposed suggestion in detail.\r\n\r\n1. Linkcode extension error https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739213117 and related workaround: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/sphinxext/github_link.py#L43-L44\r\n\r\n2. See Sphinx warning https://github.com/nilearn/nilearn/pull/4017#issuecomment-1739630179 and related hotfix of remving sphinx role here: https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/modules/experimental.rst?plain=1#L10\r\n\r\n3. Sphinx autodoc has an issue with resolving external libraries like numpy when using typehints. It seems to be a bug similar to what is reported here https://github.com/sphinx-doc/sphinx/issues/10785. For now this autodoc argument is added to hide the typehints from the integrated typing https://github.com/nilearn/nilearn/blob/9133bd98ddc085e14d545fd0439be621c8679f91/doc/conf.py#L57\r\nAnd if we have both docstrings and the typehints we have double rendering in the docs of the description of a e.g. class and this is redundant and looks ugly so we should also discuss how we want to approach adding docstrings + typing and how to configure what will be displayed. I'll post an example once the dev docs are built\r\n\r\n### List any pages that would be impacted.\r\n\r\n_No response_\n", "before_files": [{"content": "import inspect\nimport os\nimport subprocess\nimport sys\nfrom functools import partial\nfrom operator import attrgetter\n\nREVISION_CMD = \"git rev-parse --short HEAD\"\n\n\ndef _get_git_revision():\n try:\n revision = subprocess.check_output(REVISION_CMD.split()).strip()\n except (subprocess.CalledProcessError, OSError):\n print(\"Failed to execute git to get revision\")\n return None\n return revision.decode(\"utf-8\")\n\n\ndef _linkcode_resolve(domain, info, package, url_fmt, revision):\n \"\"\"Determine a link to online source for a class/method/function.\n\n This is called by sphinx.ext.linkcode\n\n An example with a long-untouched module that everyone has\n >>> _linkcode_resolve('py', {'module': 'tty',\n ... 'fullname': 'setraw'},\n ... package='tty',\n ... url_fmt='http://hg.python.org/cpython/file/'\n ... '{revision}/Lib/{package}/{path}#L{lineno}',\n ... revision='xxxx')\n 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'\n \"\"\"\n if revision is None:\n return\n if domain not in (\"py\", \"pyx\"):\n return\n if not info.get(\"module\") or not info.get(\"fullname\"):\n return\n\n class_name = info[\"fullname\"].split(\".\")[0]\n module = __import__(info[\"module\"], fromlist=[class_name])\n if info[\"module\"] == \"nilearn.experimental.surface\":\n return\n obj = attrgetter(info[\"fullname\"])(module)\n\n # Unwrap the object to get the correct source\n # file in case that is wrapped by a decorator\n obj = inspect.unwrap(obj)\n\n try:\n fn = inspect.getsourcefile(obj)\n except Exception:\n fn = None\n if not fn:\n try:\n fn = inspect.getsourcefile(sys.modules[obj.__module__])\n except Exception:\n fn = None\n if not fn:\n return\n\n # Don't include filenames from outside this package's tree\n if os.path.dirname(__import__(package).__file__) not in fn:\n return\n\n fn = os.path.relpath(\n fn, start=os.path.dirname(__import__(package).__file__)\n )\n try:\n lineno = inspect.getsourcelines(obj)[1]\n except Exception:\n lineno = \"\"\n return url_fmt.format(\n revision=revision, package=package, path=fn, lineno=lineno\n )\n\n\ndef make_linkcode_resolve(package, url_fmt):\n \"\"\"Return a linkcode_resolve function for the given URL format.\n\n revision is a git commit reference (hash or name)\n\n package is the name of the root module of the package\n\n url_fmt is along the lines of ('https://github.com/USER/PROJECT/'\n 'blob/{revision}/{package}/'\n '{path}#L{lineno}')\n \"\"\"\n revision = _get_git_revision()\n return partial(\n _linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt\n )\n", "path": "doc/sphinxext/github_link.py"}]}
1,859
183
gh_patches_debug_14871
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2610
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> preview in text commenting does not work URL: https://meinberlin-dev.liqd.net/dashboard/projects/textkommentierung/basic/ user: initiator expected behaviour: should be able to see preview behaviour: get 405 error Comment/Question: I checked brainstorming and it works. After I enter content for the document it also works, so I guess it has to do with the module text commenting. We also have the problem on prod, so I guess we need a hot fix. </issue> <code> [start of meinberlin/apps/documents/views.py] 1 from django.http import Http404 2 from django.urls import reverse 3 from django.utils.functional import cached_property 4 from django.utils.translation import ugettext_lazy as _ 5 from django.views import generic 6 7 from adhocracy4.dashboard import mixins as dashboard_mixins 8 from adhocracy4.projects.mixins import DisplayProjectOrModuleMixin 9 from adhocracy4.projects.mixins import ProjectMixin 10 from adhocracy4.rules import mixins as rules_mixins 11 from meinberlin.apps.exports.views import DashboardExportView 12 13 from . import models 14 15 16 class DocumentDashboardView(ProjectMixin, 17 dashboard_mixins.DashboardBaseMixin, 18 dashboard_mixins.DashboardComponentMixin, 19 generic.TemplateView): 20 template_name = 'meinberlin_documents/document_dashboard.html' 21 permission_required = 'a4projects.change_project' 22 23 def get_permission_object(self): 24 return self.project 25 26 27 class ChapterDetailView(ProjectMixin, 28 rules_mixins.PermissionRequiredMixin, 29 generic.DetailView, 30 DisplayProjectOrModuleMixin): 31 model = models.Chapter 32 permission_required = 'meinberlin_documents.view_chapter' 33 get_context_from_object = True 34 35 def get_context_data(self, **kwargs): 36 context = super().get_context_data(**kwargs) 37 context['chapter_list'] = self.chapter_list 38 return context 39 40 @property 41 def chapter_list(self): 42 return models.Chapter.objects.filter(module=self.module) 43 44 @cached_property 45 def extends(self): 46 if self.url_name == 'module-detail': 47 return 'a4modules/module_detail.html' 48 if self.url_name == 'chapter-detail': 49 if self.module.is_in_module_cluster: 50 return 'a4modules/module_detail.html' 51 return 'a4projects/project_detail.html' 52 53 54 class DocumentDetailView(ChapterDetailView): 55 get_context_from_object = False 56 57 def get_object(self): 58 first_chapter = models.Chapter.objects \ 59 .filter(module=self.module) \ 60 .first() 61 62 if not first_chapter: 63 raise Http404(_('Document has no chapters defined.')) 64 return first_chapter 65 66 67 class ParagraphDetailView(ProjectMixin, 68 rules_mixins.PermissionRequiredMixin, 69 generic.DetailView): 70 model = models.Paragraph 71 permission_required = 'meinberlin_documents.view_paragraph' 72 73 74 class DocumentDashboardExportView(DashboardExportView): 75 template_name = 'meinberlin_exports/export_dashboard.html' 76 77 def get_context_data(self, **kwargs): 78 context = super().get_context_data(**kwargs) 79 context['comment_export'] = reverse( 80 'a4dashboard:document-comment-export', 81 kwargs={'module_slug': self.module.slug}) 82 return context 83 [end of meinberlin/apps/documents/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py --- a/meinberlin/apps/documents/views.py +++ b/meinberlin/apps/documents/views.py @@ -1,7 +1,5 @@ -from django.http import Http404 from django.urls import reverse from django.utils.functional import cached_property -from django.utils.translation import ugettext_lazy as _ from django.views import generic from adhocracy4.dashboard import mixins as dashboard_mixins @@ -58,9 +56,6 @@ first_chapter = models.Chapter.objects \ .filter(module=self.module) \ .first() - - if not first_chapter: - raise Http404(_('Document has no chapters defined.')) return first_chapter
{"golden_diff": "diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py\n--- a/meinberlin/apps/documents/views.py\n+++ b/meinberlin/apps/documents/views.py\n@@ -1,7 +1,5 @@\n-from django.http import Http404\n from django.urls import reverse\n from django.utils.functional import cached_property\n-from django.utils.translation import ugettext_lazy as _\n from django.views import generic\n \n from adhocracy4.dashboard import mixins as dashboard_mixins\n@@ -58,9 +56,6 @@\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n-\n- if not first_chapter:\n- raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n", "issue": "preview in text commenting does not work\nURL: https://meinberlin-dev.liqd.net/dashboard/projects/textkommentierung/basic/\r\nuser: initiator\r\nexpected behaviour: should be able to see preview\r\nbehaviour: get 405 error\r\nComment/Question: I checked brainstorming and it works. After I enter content for the document it also works, so I guess it has to do with the module text commenting. We also have the problem on prod, so I guess we need a hot fix.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n @cached_property\n def extends(self):\n if self.url_name == 'module-detail':\n return 'a4modules/module_detail.html'\n if self.url_name == 'chapter-detail':\n if self.module.is_in_module_cluster:\n return 'a4modules/module_detail.html'\n return 'a4projects/project_detail.html'\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}]}
1,363
171
gh_patches_debug_598
rasdani/github-patches
git_diff
pex-tool__pex-1733
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.82 On the docket: + [x] Pex resolve checking does not allow resolved pre-releases when --no-pre. #1730 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.81" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.81" +__version__ = "2.1.82"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.81\"\n+__version__ = \"2.1.82\"\n", "issue": "Release 2.1.82\nOn the docket:\r\n+ [x] Pex resolve checking does not allow resolved pre-releases when --no-pre. #1730 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.81\"\n", "path": "pex/version.py"}]}
623
97
gh_patches_debug_4478
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-2168
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mypy doesn't pick up type hints[BUG] <!-- Thanks for reporting issues of python-telegram-bot! Use this template to notify us if you found a bug. To make it easier for us to help you please enter detailed information below. Please note, we only support the latest version of python-telegram-bot and master branch. Please make sure to upgrade & recreate the issue on the latest version prior to opening an issue. --> ### Steps to reproduce 1. import python-telegram-bot modules into your `test.py` file (e.g. `from telegram import InlineKeyboardMarkup`) 2. run `mypy test.py` 3. receive the following output `error: Skipping analyzing 'telegram': found module but no type hints or library stubs` ### Expected behaviour mypy should pickup the typehints as they've been added in the latest release (version `13` ) ### Actual behaviour mypy doesn't pickup the typehints. I'm confident this can be solved by either 1. adding `python-telegram-bot` to `typeshed` 2. adding a `py.typed` file More information can be found [here.](https://stackoverflow.com/questions/60856237/mypy-cant-find-type-hints-for-black) ### Configuration **Operating System:** Ubuntu 18.04 **Version of Python, python-telegram-bot & dependencies:** ``$ python -m telegram`` python-telegram-bot 13.0 certifi 2020.06.20 Python 3.8.0 (default, Oct 30 2020, 19:06:04) [GCC 7.5.0] ### Logs ``` error: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs note: See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports error: Skipping analyzing 'telegram': found module but no type hints or library stubs error: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs error: Skipping analyzing 'telegram': found module but no type hints or library stubs ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 """The setup and build script for the python-telegram-bot library.""" 3 4 import codecs 5 import os 6 import sys 7 8 from setuptools import setup, find_packages 9 10 11 def requirements(): 12 """Build the requirements list for this project""" 13 requirements_list = [] 14 15 with open('requirements.txt') as requirements: 16 for install in requirements: 17 requirements_list.append(install.strip()) 18 19 return requirements_list 20 21 22 packages = find_packages(exclude=['tests*']) 23 requirements = requirements() 24 25 # Allow for a package install to not use the vendored urllib3 26 UPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3' 27 if UPSTREAM_URLLIB3_FLAG in sys.argv: 28 sys.argv.remove(UPSTREAM_URLLIB3_FLAG) 29 requirements.append('urllib3 >= 1.19.1') 30 packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')] 31 32 with codecs.open('README.rst', 'r', 'utf-8') as fd: 33 fn = os.path.join('telegram', 'version.py') 34 with open(fn) as fh: 35 code = compile(fh.read(), fn, 'exec') 36 exec(code) 37 38 setup(name='python-telegram-bot', 39 version=__version__, 40 author='Leandro Toledo', 41 author_email='[email protected]', 42 license='LGPLv3', 43 url='https://python-telegram-bot.org/', 44 keywords='python telegram bot api wrapper', 45 description="We have made you a wrapper you can't refuse", 46 long_description=fd.read(), 47 packages=packages, 48 install_requires=requirements, 49 extras_require={ 50 'json': 'ujson', 51 'socks': 'PySocks' 52 }, 53 include_package_data=True, 54 classifiers=[ 55 'Development Status :: 5 - Production/Stable', 56 'Intended Audience :: Developers', 57 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)', 58 'Operating System :: OS Independent', 59 'Topic :: Software Development :: Libraries :: Python Modules', 60 'Topic :: Communications :: Chat', 61 'Topic :: Internet', 62 'Programming Language :: Python', 63 'Programming Language :: Python :: 3', 64 'Programming Language :: Python :: 3.6', 65 'Programming Language :: Python :: 3.7', 66 'Programming Language :: Python :: 3.8', 67 'Programming Language :: Python :: 3.9', 68 ],) 69 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -45,6 +45,7 @@ description="We have made you a wrapper you can't refuse", long_description=fd.read(), packages=packages, + package_data={'telegram': ['py.typed']}, install_requires=requirements, extras_require={ 'json': 'ujson',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,6 +45,7 @@\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n+ package_data={'telegram': ['py.typed']},\n install_requires=requirements,\n extras_require={\n 'json': 'ujson',\n", "issue": "mypy doesn't pick up type hints[BUG]\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug.\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1. import python-telegram-bot modules into your `test.py` file (e.g. `from telegram import InlineKeyboardMarkup`)\r\n\r\n2. run `mypy test.py`\r\n\r\n3. receive the following output `error: Skipping analyzing 'telegram': found module but no type hints or library stubs`\r\n\r\n### Expected behaviour\r\nmypy should pickup the typehints as they've been added in the latest release (version `13` )\r\n\r\n### Actual behaviour\r\nmypy doesn't pickup the typehints.\r\n\r\nI'm confident this can be solved by either\r\n\r\n1. adding `python-telegram-bot` to `typeshed` \r\n2. adding a `py.typed` file\r\n\r\nMore information can be found [here.](https://stackoverflow.com/questions/60856237/mypy-cant-find-type-hints-for-black)\r\n\r\n### Configuration\r\n**Operating System:**\r\nUbuntu 18.04\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\npython-telegram-bot 13.0\r\ncertifi 2020.06.20\r\nPython 3.8.0 (default, Oct 30 2020, 19:06:04) [GCC 7.5.0]\r\n### Logs\r\n```\r\nerror: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs\r\nnote: See https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports\r\nerror: Skipping analyzing 'telegram': found module but no type hints or library stubs\r\nerror: Skipping analyzing 'telegram.ext': found module but no type hints or library stubs\r\nerror: Skipping analyzing 'telegram': found module but no type hints or library stubs\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\nrequirements = requirements()\n\n# Allow for a package install to not use the vendored urllib3\nUPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'\nif UPSTREAM_URLLIB3_FLAG in sys.argv:\n sys.argv.remove(UPSTREAM_URLLIB3_FLAG)\n requirements.append('urllib3 >= 1.19.1')\n packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')]\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements,\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],)\n", "path": "setup.py"}]}
1,661
89
gh_patches_debug_4094
rasdani/github-patches
git_diff
urllib3__urllib3-841
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PyOpenSSL: Fails to import after installation with the provided instructions The [Security documentation page](https://urllib3.readthedocs.org/en/latest/security.html#openssl-pyopenssl) indicates that to use PyOpenSSL that you must first install: - pip3 install pyopenssl ndg-httpsclient pyasn1 And then in a Python interpreter run: - import urllib3.contrib.pyopenssl - urllib3.contrib.pyopenssl.inject_into_urllib3() However on Python 3.4.4 (the latest 3.4.x) on OS X 10.11 (the latest OS X) I get the following traceback when executing `import urllib3.contrib.pyopenssl`: ``` Traceback (most recent call last): File "/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py", line 60, in <module> from socket import _fileobject ImportError: cannot import name '_fileobject' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py", line 63, in <module> from urllib3.packages.backports.makefile import backport_makefile ImportError: No module named 'urllib3.packages.backports' ``` Perhaps there is some missing step in the documentation? </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup 4 5 import os 6 import re 7 import codecs 8 9 base_path = os.path.dirname(__file__) 10 11 # Get the version (borrowed from SQLAlchemy) 12 fp = open(os.path.join(base_path, 'urllib3', '__init__.py')) 13 VERSION = re.compile(r".*__version__ = '(.*?)'", 14 re.S).match(fp.read()).group(1) 15 fp.close() 16 17 readme = codecs.open('README.rst', encoding='utf-8').read() 18 changes = codecs.open('CHANGES.rst', encoding='utf-8').read() 19 version = VERSION 20 21 setup(name='urllib3', 22 version=version, 23 description="HTTP library with thread-safe connection pooling, file post, and more.", 24 long_description=u'\n\n'.join([readme, changes]), 25 classifiers=[ 26 'Environment :: Web Environment', 27 'Intended Audience :: Developers', 28 'License :: OSI Approved :: MIT License', 29 'Operating System :: OS Independent', 30 'Programming Language :: Python', 31 'Programming Language :: Python :: 2', 32 'Programming Language :: Python :: 3', 33 'Topic :: Internet :: WWW/HTTP', 34 'Topic :: Software Development :: Libraries', 35 ], 36 keywords='urllib httplib threadsafe filepost http https ssl pooling', 37 author='Andrey Petrov', 38 author_email='[email protected]', 39 url='http://urllib3.readthedocs.org/', 40 license='MIT', 41 packages=['urllib3', 42 'urllib3.packages', 'urllib3.packages.ssl_match_hostname', 43 'urllib3.contrib', 'urllib3.util', 44 ], 45 requires=[], 46 tests_require=[ 47 # These are a less-specific subset of dev-requirements.txt, for the 48 # convenience of distro package maintainers. 49 'nose', 50 'mock', 51 'tornado', 52 ], 53 test_suite='test', 54 extras_require={ 55 'secure': [ 56 'pyOpenSSL>=0.13', 57 'ndg-httpsclient', 58 'pyasn1', 59 'certifi', 60 ], 61 'socks': [ 62 'PySocks>=1.5.6,<2.0', 63 ] 64 }, 65 ) 66 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -40,7 +40,8 @@ license='MIT', packages=['urllib3', 'urllib3.packages', 'urllib3.packages.ssl_match_hostname', - 'urllib3.contrib', 'urllib3.util', + 'urllib3.packages.backports', 'urllib3.contrib', + 'urllib3.util', ], requires=[], tests_require=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,8 @@\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n- 'urllib3.contrib', 'urllib3.util',\n+ 'urllib3.packages.backports', 'urllib3.contrib',\n+ 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n", "issue": "PyOpenSSL: Fails to import after installation with the provided instructions\nThe [Security documentation page](https://urllib3.readthedocs.org/en/latest/security.html#openssl-pyopenssl) indicates that to use PyOpenSSL that you must first install:\n- pip3 install pyopenssl ndg-httpsclient pyasn1\n\nAnd then in a Python interpreter run:\n- import urllib3.contrib.pyopenssl\n- urllib3.contrib.pyopenssl.inject_into_urllib3()\n\nHowever on Python 3.4.4 (the latest 3.4.x) on OS X 10.11 (the latest OS X) I get the following traceback when executing `import urllib3.contrib.pyopenssl`:\n\n```\nTraceback (most recent call last):\n File \"/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py\", line 60, in <module>\n from socket import _fileobject\nImportError: cannot import name '_fileobject'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/Users/davidf/Projects/webcrystal/venv/lib/python3.4/site-packages/urllib3/contrib/pyopenssl.py\", line 63, in <module>\n from urllib3.packages.backports.makefile import backport_makefile\nImportError: No module named 'urllib3.packages.backports'\n```\n\nPerhaps there is some missing step in the documentation?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nfp = open(os.path.join(base_path, 'urllib3', '__init__.py'))\nVERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\nfp.close()\n\nreadme = codecs.open('README.rst', encoding='utf-8').read()\nchanges = codecs.open('CHANGES.rst', encoding='utf-8').read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='http://urllib3.readthedocs.org/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.contrib', 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.13',\n 'ndg-httpsclient',\n 'pyasn1',\n 'certifi',\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0',\n ]\n },\n )\n", "path": "setup.py"}]}
1,467
112
gh_patches_debug_30146
rasdani/github-patches
git_diff
beetbox__beets-4807
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plugins: add markunmatched ## Description Add a plugin useful for bulk importers. ## To Do - [x] Documentation. (If you've add a new command-line flag, for example, find the appropriate page under `docs/` to describe it.) - [x] Changelog. (Add an entry to `docs/changelog.rst` near the top of the document.) - [ ] Tests. (Encouraged but not strictly required.) </issue> <code> [start of beetsplug/mbsubmit.py] 1 # This file is part of beets. 2 # Copyright 2016, Adrian Sampson and Diego Moreda. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """Aid in submitting information to MusicBrainz. 16 17 This plugin allows the user to print track information in a format that is 18 parseable by the MusicBrainz track parser [1]. Programmatic submitting is not 19 implemented by MusicBrainz yet. 20 21 [1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings 22 """ 23 24 25 from beets import ui 26 from beets.autotag import Recommendation 27 from beets.plugins import BeetsPlugin 28 from beets.ui.commands import PromptChoice 29 from beetsplug.info import print_data 30 31 32 class MBSubmitPlugin(BeetsPlugin): 33 def __init__(self): 34 super().__init__() 35 36 self.config.add( 37 { 38 "format": "$track. $title - $artist ($length)", 39 "threshold": "medium", 40 } 41 ) 42 43 # Validate and store threshold. 44 self.threshold = self.config["threshold"].as_choice( 45 { 46 "none": Recommendation.none, 47 "low": Recommendation.low, 48 "medium": Recommendation.medium, 49 "strong": Recommendation.strong, 50 } 51 ) 52 53 self.register_listener( 54 "before_choose_candidate", self.before_choose_candidate_event 55 ) 56 57 def before_choose_candidate_event(self, session, task): 58 if task.rec <= self.threshold: 59 return [PromptChoice("p", "Print tracks", self.print_tracks)] 60 61 def print_tracks(self, session, task): 62 for i in sorted(task.items, key=lambda i: i.track): 63 print_data(None, i, self.config["format"].as_str()) 64 65 def commands(self): 66 """Add beet UI commands for mbsubmit.""" 67 mbsubmit_cmd = ui.Subcommand( 68 "mbsubmit", help="Submit Tracks to MusicBrainz" 69 ) 70 71 def func(lib, opts, args): 72 items = lib.items(ui.decargs(args)) 73 self._mbsubmit(items) 74 75 mbsubmit_cmd.func = func 76 77 return [mbsubmit_cmd] 78 79 def _mbsubmit(self, items): 80 """Print track information to be submitted to MusicBrainz.""" 81 for i in sorted(items, key=lambda i: i.track): 82 print_data(None, i, self.config["format"].as_str()) 83 [end of beetsplug/mbsubmit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py --- a/beetsplug/mbsubmit.py +++ b/beetsplug/mbsubmit.py @@ -21,11 +21,13 @@ [1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings """ +import subprocess from beets import ui from beets.autotag import Recommendation from beets.plugins import BeetsPlugin from beets.ui.commands import PromptChoice +from beets.util import displayable_path from beetsplug.info import print_data @@ -37,6 +39,7 @@ { "format": "$track. $title - $artist ($length)", "threshold": "medium", + "picard_path": "picard", } ) @@ -56,7 +59,21 @@ def before_choose_candidate_event(self, session, task): if task.rec <= self.threshold: - return [PromptChoice("p", "Print tracks", self.print_tracks)] + return [ + PromptChoice("p", "Print tracks", self.print_tracks), + PromptChoice("o", "Open files with Picard", self.picard), + ] + + def picard(self, session, task): + paths = [] + for p in task.paths: + paths.append(displayable_path(p)) + try: + picard_path = self.config["picard_path"].as_str() + subprocess.Popen([picard_path] + paths) + self._log.info("launched picard from\n{}", picard_path) + except OSError as exc: + self._log.error(f"Could not open picard, got error:\n{exc}") def print_tracks(self, session, task): for i in sorted(task.items, key=lambda i: i.track):
{"golden_diff": "diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py\n--- a/beetsplug/mbsubmit.py\n+++ b/beetsplug/mbsubmit.py\n@@ -21,11 +21,13 @@\n [1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n \"\"\"\n \n+import subprocess\n \n from beets import ui\n from beets.autotag import Recommendation\n from beets.plugins import BeetsPlugin\n from beets.ui.commands import PromptChoice\n+from beets.util import displayable_path\n from beetsplug.info import print_data\n \n \n@@ -37,6 +39,7 @@\n {\n \"format\": \"$track. $title - $artist ($length)\",\n \"threshold\": \"medium\",\n+ \"picard_path\": \"picard\",\n }\n )\n \n@@ -56,7 +59,21 @@\n \n def before_choose_candidate_event(self, session, task):\n if task.rec <= self.threshold:\n- return [PromptChoice(\"p\", \"Print tracks\", self.print_tracks)]\n+ return [\n+ PromptChoice(\"p\", \"Print tracks\", self.print_tracks),\n+ PromptChoice(\"o\", \"Open files with Picard\", self.picard),\n+ ]\n+\n+ def picard(self, session, task):\n+ paths = []\n+ for p in task.paths:\n+ paths.append(displayable_path(p))\n+ try:\n+ picard_path = self.config[\"picard_path\"].as_str()\n+ subprocess.Popen([picard_path] + paths)\n+ self._log.info(\"launched picard from\\n{}\", picard_path)\n+ except OSError as exc:\n+ self._log.error(f\"Could not open picard, got error:\\n{exc}\")\n \n def print_tracks(self, session, task):\n for i in sorted(task.items, key=lambda i: i.track):\n", "issue": "plugins: add markunmatched\n## Description\r\n\r\nAdd a plugin useful for bulk importers.\r\n\r\n## To Do\r\n\r\n- [x] Documentation. (If you've add a new command-line flag, for example, find the appropriate page under `docs/` to describe it.)\r\n- [x] Changelog. (Add an entry to `docs/changelog.rst` near the top of the document.)\r\n- [ ] Tests. (Encouraged but not strictly required.)\r\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2016, Adrian Sampson and Diego Moreda.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Aid in submitting information to MusicBrainz.\n\nThis plugin allows the user to print track information in a format that is\nparseable by the MusicBrainz track parser [1]. Programmatic submitting is not\nimplemented by MusicBrainz yet.\n\n[1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n\"\"\"\n\n\nfrom beets import ui\nfrom beets.autotag import Recommendation\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui.commands import PromptChoice\nfrom beetsplug.info import print_data\n\n\nclass MBSubmitPlugin(BeetsPlugin):\n def __init__(self):\n super().__init__()\n\n self.config.add(\n {\n \"format\": \"$track. $title - $artist ($length)\",\n \"threshold\": \"medium\",\n }\n )\n\n # Validate and store threshold.\n self.threshold = self.config[\"threshold\"].as_choice(\n {\n \"none\": Recommendation.none,\n \"low\": Recommendation.low,\n \"medium\": Recommendation.medium,\n \"strong\": Recommendation.strong,\n }\n )\n\n self.register_listener(\n \"before_choose_candidate\", self.before_choose_candidate_event\n )\n\n def before_choose_candidate_event(self, session, task):\n if task.rec <= self.threshold:\n return [PromptChoice(\"p\", \"Print tracks\", self.print_tracks)]\n\n def print_tracks(self, session, task):\n for i in sorted(task.items, key=lambda i: i.track):\n print_data(None, i, self.config[\"format\"].as_str())\n\n def commands(self):\n \"\"\"Add beet UI commands for mbsubmit.\"\"\"\n mbsubmit_cmd = ui.Subcommand(\n \"mbsubmit\", help=\"Submit Tracks to MusicBrainz\"\n )\n\n def func(lib, opts, args):\n items = lib.items(ui.decargs(args))\n self._mbsubmit(items)\n\n mbsubmit_cmd.func = func\n\n return [mbsubmit_cmd]\n\n def _mbsubmit(self, items):\n \"\"\"Print track information to be submitted to MusicBrainz.\"\"\"\n for i in sorted(items, key=lambda i: i.track):\n print_data(None, i, self.config[\"format\"].as_str())\n", "path": "beetsplug/mbsubmit.py"}]}
1,417
416
gh_patches_debug_2187
rasdani/github-patches
git_diff
projectmesa__mesa-989
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Color patches takes a long time to load When you hit "reset" to load the color patches example, it takes a long time to load. Not sure why. As a result, I thought it was broken. To recreate... ``` cd examples/color_patches python run.py ``` Wait for patches to load when browser window pops up. OR hit reset and wait for color patches to load. This is what it should look something like... <img width="407" alt="screen shot 2018-04-01 at 10 03 33 pm" src="https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png"> Color patches takes a long time to load When you hit "reset" to load the color patches example, it takes a long time to load. Not sure why. As a result, I thought it was broken. To recreate... ``` cd examples/color_patches python run.py ``` Wait for patches to load when browser window pops up. OR hit reset and wait for color patches to load. This is what it should look something like... <img width="407" alt="screen shot 2018-04-01 at 10 03 33 pm" src="https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png"> </issue> <code> [start of examples/color_patches/color_patches/server.py] 1 """ 2 handles the definition of the canvas parameters and 3 the drawing of the model representation on the canvas 4 """ 5 # import webbrowser 6 7 from mesa.visualization.modules import CanvasGrid 8 from mesa.visualization.ModularVisualization import ModularServer 9 10 from .model import ColorPatches 11 12 _COLORS = [ 13 "Aqua", 14 "Blue", 15 "Fuchsia", 16 "Gray", 17 "Green", 18 "Lime", 19 "Maroon", 20 "Navy", 21 "Olive", 22 "Orange", 23 "Purple", 24 "Red", 25 "Silver", 26 "Teal", 27 "White", 28 "Yellow", 29 ] 30 31 32 grid_rows = 50 33 grid_cols = 25 34 cell_size = 10 35 canvas_width = grid_rows * cell_size 36 canvas_height = grid_cols * cell_size 37 38 39 def color_patch_draw(cell): 40 """ 41 This function is registered with the visualization server to be called 42 each tick to indicate how to draw the cell in its current state. 43 44 :param cell: the cell in the simulation 45 46 :return: the portrayal dictionary. 47 48 """ 49 assert cell is not None 50 portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0} 51 portrayal["x"] = cell.get_row() 52 portrayal["y"] = cell.get_col() 53 portrayal["Color"] = _COLORS[cell.get_state()] 54 return portrayal 55 56 57 canvas_element = CanvasGrid( 58 color_patch_draw, grid_rows, grid_cols, canvas_width, canvas_height 59 ) 60 61 server = ModularServer( 62 ColorPatches, 63 [canvas_element], 64 "Color Patches", 65 {"width": canvas_width, "height": canvas_height}, 66 ) 67 68 # webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable 69 [end of examples/color_patches/color_patches/server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/color_patches/color_patches/server.py b/examples/color_patches/color_patches/server.py --- a/examples/color_patches/color_patches/server.py +++ b/examples/color_patches/color_patches/server.py @@ -62,7 +62,7 @@ ColorPatches, [canvas_element], "Color Patches", - {"width": canvas_width, "height": canvas_height}, + {"width": grid_rows, "height": grid_cols}, ) # webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable
{"golden_diff": "diff --git a/examples/color_patches/color_patches/server.py b/examples/color_patches/color_patches/server.py\n--- a/examples/color_patches/color_patches/server.py\n+++ b/examples/color_patches/color_patches/server.py\n@@ -62,7 +62,7 @@\n ColorPatches,\n [canvas_element],\n \"Color Patches\",\n- {\"width\": canvas_width, \"height\": canvas_height},\n+ {\"width\": grid_rows, \"height\": grid_cols},\n )\n \n # webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable\n", "issue": "Color patches takes a long time to load\nWhen you hit \"reset\" to load the color patches example, it takes a long time to load.\r\nNot sure why. As a result, I thought it was broken.\r\n\r\nTo recreate... \r\n```\r\ncd examples/color_patches\r\npython run.py\r\n```\r\n\r\nWait for patches to load when browser window pops up. \r\nOR hit reset and wait for color patches to load.\r\n\r\nThis is what it should look something like...\r\n<img width=\"407\" alt=\"screen shot 2018-04-01 at 10 03 33 pm\" src=\"https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png\">\nColor patches takes a long time to load\nWhen you hit \"reset\" to load the color patches example, it takes a long time to load.\r\nNot sure why. As a result, I thought it was broken.\r\n\r\nTo recreate... \r\n```\r\ncd examples/color_patches\r\npython run.py\r\n```\r\n\r\nWait for patches to load when browser window pops up. \r\nOR hit reset and wait for color patches to load.\r\n\r\nThis is what it should look something like...\r\n<img width=\"407\" alt=\"screen shot 2018-04-01 at 10 03 33 pm\" src=\"https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png\">\n", "before_files": [{"content": "\"\"\"\nhandles the definition of the canvas parameters and\nthe drawing of the model representation on the canvas\n\"\"\"\n# import webbrowser\n\nfrom mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import ColorPatches\n\n_COLORS = [\n \"Aqua\",\n \"Blue\",\n \"Fuchsia\",\n \"Gray\",\n \"Green\",\n \"Lime\",\n \"Maroon\",\n \"Navy\",\n \"Olive\",\n \"Orange\",\n \"Purple\",\n \"Red\",\n \"Silver\",\n \"Teal\",\n \"White\",\n \"Yellow\",\n]\n\n\ngrid_rows = 50\ngrid_cols = 25\ncell_size = 10\ncanvas_width = grid_rows * cell_size\ncanvas_height = grid_cols * cell_size\n\n\ndef color_patch_draw(cell):\n \"\"\"\n This function is registered with the visualization server to be called\n each tick to indicate how to draw the cell in its current state.\n\n :param cell: the cell in the simulation\n\n :return: the portrayal dictionary.\n\n \"\"\"\n assert cell is not None\n portrayal = {\"Shape\": \"rect\", \"w\": 1, \"h\": 1, \"Filled\": \"true\", \"Layer\": 0}\n portrayal[\"x\"] = cell.get_row()\n portrayal[\"y\"] = cell.get_col()\n portrayal[\"Color\"] = _COLORS[cell.get_state()]\n return portrayal\n\n\ncanvas_element = CanvasGrid(\n color_patch_draw, grid_rows, grid_cols, canvas_width, canvas_height\n)\n\nserver = ModularServer(\n ColorPatches,\n [canvas_element],\n \"Color Patches\",\n {\"width\": canvas_width, \"height\": canvas_height},\n)\n\n# webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable\n", "path": "examples/color_patches/color_patches/server.py"}]}
1,449
129
gh_patches_debug_15770
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1793
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation references old propagators module `propagators.inject` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/docs/examples/auto-instrumentation/client.py#L40), `propagators.extract` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blame/main/docs/examples/auto-instrumentation/README.rst#L40). They should use `propagate` instead. </issue> <code> [start of docs/examples/auto-instrumentation/client.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from sys import argv 16 17 from requests import get 18 19 from opentelemetry import propagators, trace 20 from opentelemetry.sdk.trace import TracerProvider 21 from opentelemetry.sdk.trace.export import ( 22 ConsoleSpanExporter, 23 SimpleSpanProcessor, 24 ) 25 26 trace.set_tracer_provider(TracerProvider()) 27 tracer = trace.get_tracer_provider().get_tracer(__name__) 28 29 trace.get_tracer_provider().add_span_processor( 30 SimpleSpanProcessor(ConsoleSpanExporter()) 31 ) 32 33 34 assert len(argv) == 2 35 36 with tracer.start_as_current_span("client"): 37 38 with tracer.start_as_current_span("client-server"): 39 headers = {} 40 propagators.inject(headers) 41 requested = get( 42 "http://localhost:8082/server_request", 43 params={"param": argv[1]}, 44 headers=headers, 45 ) 46 47 assert requested.status_code == 200 48 [end of docs/examples/auto-instrumentation/client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/examples/auto-instrumentation/client.py b/docs/examples/auto-instrumentation/client.py --- a/docs/examples/auto-instrumentation/client.py +++ b/docs/examples/auto-instrumentation/client.py @@ -16,7 +16,8 @@ from requests import get -from opentelemetry import propagators, trace +from opentelemetry import trace +from opentelemetry.propagate import inject from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, @@ -37,7 +38,7 @@ with tracer.start_as_current_span("client-server"): headers = {} - propagators.inject(headers) + inject(headers) requested = get( "http://localhost:8082/server_request", params={"param": argv[1]},
{"golden_diff": "diff --git a/docs/examples/auto-instrumentation/client.py b/docs/examples/auto-instrumentation/client.py\n--- a/docs/examples/auto-instrumentation/client.py\n+++ b/docs/examples/auto-instrumentation/client.py\n@@ -16,7 +16,8 @@\n \n from requests import get\n \n-from opentelemetry import propagators, trace\n+from opentelemetry import trace\n+from opentelemetry.propagate import inject\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n@@ -37,7 +38,7 @@\n \n with tracer.start_as_current_span(\"client-server\"):\n headers = {}\n- propagators.inject(headers)\n+ inject(headers)\n requested = get(\n \"http://localhost:8082/server_request\",\n params={\"param\": argv[1]},\n", "issue": "Documentation references old propagators module\n`propagators.inject` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/docs/examples/auto-instrumentation/client.py#L40), `propagators.extract` is being used [here](https://github.com/open-telemetry/opentelemetry-python/blame/main/docs/examples/auto-instrumentation/README.rst#L40). They should use `propagate` instead.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sys import argv\n\nfrom requests import get\n\nfrom opentelemetry import propagators, trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n)\n\n\nassert len(argv) == 2\n\nwith tracer.start_as_current_span(\"client\"):\n\n with tracer.start_as_current_span(\"client-server\"):\n headers = {}\n propagators.inject(headers)\n requested = get(\n \"http://localhost:8082/server_request\",\n params={\"param\": argv[1]},\n headers=headers,\n )\n\n assert requested.status_code == 200\n", "path": "docs/examples/auto-instrumentation/client.py"}]}
1,038
183
gh_patches_debug_17971
rasdani/github-patches
git_diff
googleapis__python-bigquery-630
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The DB API Binary function should accept bytes. ``` (3.8) jim@ds9:~/p/g/python-bigquery-sqlalchemy$ python Python 3.8.5 (default, Jan 27 2021, 15:41:15) [GCC 9.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import google.cloud.bigquery.dbapi >>> google.cloud.bigquery.dbapi.Binary(b'x') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jim/p/g/python-bigquery/google/cloud/bigquery/dbapi/types.py", line 42, in Binary return string.encode("utf-8") AttributeError: 'bytes' object has no attribute 'encode' ``` Bytes are the most common way to represent binary data. Accepting strings, as it does now seems at best to be a convenience and at worst a bug magnet. In SQLAlchemy, if you defined a model that has a binary attribute, you'd store bytes data in it, but that would break for bigquery, di to this issue. Sqlite's `Binary` function requires bytes data. I propose to change the function to accept bytes. For the sake of backward compatibility, I propose to continue to accept strings. </issue> <code> [start of google/cloud/bigquery/dbapi/types.py] 1 # Copyright 2017 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Types used in the Google BigQuery DB-API. 16 17 See `PEP-249`_ for details. 18 19 .. _PEP-249: 20 https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors 21 """ 22 23 import datetime 24 25 26 Date = datetime.date 27 Time = datetime.time 28 Timestamp = datetime.datetime 29 DateFromTicks = datetime.date.fromtimestamp 30 TimestampFromTicks = datetime.datetime.fromtimestamp 31 32 33 def Binary(string): 34 """Contruct a DB-API binary value. 35 36 Args: 37 string (str): A string to encode as a binary value. 38 39 Returns: 40 bytes: The UTF-8 encoded bytes representing the string. 41 """ 42 return string.encode("utf-8") 43 44 45 def TimeFromTicks(ticks, tz=None): 46 """Construct a DB-API time value from the given ticks value. 47 48 Args: 49 ticks (float): 50 a number of seconds since the epoch; see the documentation of the 51 standard Python time module for details. 52 53 tz (datetime.tzinfo): (Optional) time zone to use for conversion 54 55 Returns: 56 datetime.time: time represented by ticks. 57 """ 58 dt = datetime.datetime.fromtimestamp(ticks, tz=tz) 59 return dt.timetz() 60 61 62 class _DBAPITypeObject(object): 63 """DB-API type object which compares equal to many different strings. 64 65 See `PEP-249`_ for details. 66 67 .. _PEP-249: 68 https://www.python.org/dev/peps/pep-0249/#implementation-hints-for-module-authors 69 """ 70 71 def __init__(self, *values): 72 self.values = values 73 74 def __eq__(self, other): 75 return other in self.values 76 77 78 STRING = "STRING" 79 BINARY = _DBAPITypeObject("BYTES", "RECORD", "STRUCT") 80 NUMBER = _DBAPITypeObject( 81 "INTEGER", "INT64", "FLOAT", "FLOAT64", "NUMERIC", "BIGNUMERIC", "BOOLEAN", "BOOL" 82 ) 83 DATETIME = _DBAPITypeObject("TIMESTAMP", "DATE", "TIME", "DATETIME") 84 ROWID = "ROWID" 85 [end of google/cloud/bigquery/dbapi/types.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/google/cloud/bigquery/dbapi/types.py b/google/cloud/bigquery/dbapi/types.py --- a/google/cloud/bigquery/dbapi/types.py +++ b/google/cloud/bigquery/dbapi/types.py @@ -30,16 +30,28 @@ TimestampFromTicks = datetime.datetime.fromtimestamp -def Binary(string): +def Binary(data): """Contruct a DB-API binary value. Args: - string (str): A string to encode as a binary value. + data (bytes-like): An object containing binary data and that + can be converted to bytes with the `bytes` builtin. Returns: - bytes: The UTF-8 encoded bytes representing the string. + bytes: The binary data as a bytes object. """ - return string.encode("utf-8") + if isinstance(data, int): + # This is not the conversion we're looking for, because it + # will simply create a bytes object of the given size. + raise TypeError("cannot convert `int` object to binary") + + try: + return bytes(data) + except TypeError: + if isinstance(data, str): + return data.encode("utf-8") + else: + raise def TimeFromTicks(ticks, tz=None):
{"golden_diff": "diff --git a/google/cloud/bigquery/dbapi/types.py b/google/cloud/bigquery/dbapi/types.py\n--- a/google/cloud/bigquery/dbapi/types.py\n+++ b/google/cloud/bigquery/dbapi/types.py\n@@ -30,16 +30,28 @@\n TimestampFromTicks = datetime.datetime.fromtimestamp\n \n \n-def Binary(string):\n+def Binary(data):\n \"\"\"Contruct a DB-API binary value.\n \n Args:\n- string (str): A string to encode as a binary value.\n+ data (bytes-like): An object containing binary data and that\n+ can be converted to bytes with the `bytes` builtin.\n \n Returns:\n- bytes: The UTF-8 encoded bytes representing the string.\n+ bytes: The binary data as a bytes object.\n \"\"\"\n- return string.encode(\"utf-8\")\n+ if isinstance(data, int):\n+ # This is not the conversion we're looking for, because it\n+ # will simply create a bytes object of the given size.\n+ raise TypeError(\"cannot convert `int` object to binary\")\n+\n+ try:\n+ return bytes(data)\n+ except TypeError:\n+ if isinstance(data, str):\n+ return data.encode(\"utf-8\")\n+ else:\n+ raise\n \n \n def TimeFromTicks(ticks, tz=None):\n", "issue": "The DB API Binary function should accept bytes.\n```\r\n(3.8) jim@ds9:~/p/g/python-bigquery-sqlalchemy$ python\r\nPython 3.8.5 (default, Jan 27 2021, 15:41:15) \r\n[GCC 9.3.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import google.cloud.bigquery.dbapi\r\n>>> google.cloud.bigquery.dbapi.Binary(b'x')\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jim/p/g/python-bigquery/google/cloud/bigquery/dbapi/types.py\", line 42, in Binary\r\n return string.encode(\"utf-8\")\r\nAttributeError: 'bytes' object has no attribute 'encode'\r\n```\r\nBytes are the most common way to represent binary data. Accepting strings, as it does now seems at best to be a convenience and at worst a bug magnet.\r\n\r\nIn SQLAlchemy, if you defined a model that has a binary attribute, you'd store bytes data in it, but that would break for bigquery, di to this issue.\r\n\r\nSqlite's `Binary` function requires bytes data.\r\n\r\nI propose to change the function to accept bytes. For the sake of backward compatibility, I propose to continue to accept strings.\r\n\n", "before_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Types used in the Google BigQuery DB-API.\n\nSee `PEP-249`_ for details.\n\n.. _PEP-249:\n https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors\n\"\"\"\n\nimport datetime\n\n\nDate = datetime.date\nTime = datetime.time\nTimestamp = datetime.datetime\nDateFromTicks = datetime.date.fromtimestamp\nTimestampFromTicks = datetime.datetime.fromtimestamp\n\n\ndef Binary(string):\n \"\"\"Contruct a DB-API binary value.\n\n Args:\n string (str): A string to encode as a binary value.\n\n Returns:\n bytes: The UTF-8 encoded bytes representing the string.\n \"\"\"\n return string.encode(\"utf-8\")\n\n\ndef TimeFromTicks(ticks, tz=None):\n \"\"\"Construct a DB-API time value from the given ticks value.\n\n Args:\n ticks (float):\n a number of seconds since the epoch; see the documentation of the\n standard Python time module for details.\n\n tz (datetime.tzinfo): (Optional) time zone to use for conversion\n\n Returns:\n datetime.time: time represented by ticks.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(ticks, tz=tz)\n return dt.timetz()\n\n\nclass _DBAPITypeObject(object):\n \"\"\"DB-API type object which compares equal to many different strings.\n\n See `PEP-249`_ for details.\n\n .. _PEP-249:\n https://www.python.org/dev/peps/pep-0249/#implementation-hints-for-module-authors\n \"\"\"\n\n def __init__(self, *values):\n self.values = values\n\n def __eq__(self, other):\n return other in self.values\n\n\nSTRING = \"STRING\"\nBINARY = _DBAPITypeObject(\"BYTES\", \"RECORD\", \"STRUCT\")\nNUMBER = _DBAPITypeObject(\n \"INTEGER\", \"INT64\", \"FLOAT\", \"FLOAT64\", \"NUMERIC\", \"BIGNUMERIC\", \"BOOLEAN\", \"BOOL\"\n)\nDATETIME = _DBAPITypeObject(\"TIMESTAMP\", \"DATE\", \"TIME\", \"DATETIME\")\nROWID = \"ROWID\"\n", "path": "google/cloud/bigquery/dbapi/types.py"}]}
1,620
283
gh_patches_debug_27461
rasdani/github-patches
git_diff
napari__napari-745
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show logo during startup ## 🚀 Feature Right now napari takes a long time to launch the viewer. People can get discouraged as they wait, unsure if napari is loading, or if their data is loading, or if it is just hanging. Showing the napari logo, possibly with / without some text during that time might make it feel better. </issue> <code> [start of napari/_qt/event_loop.py] 1 import sys 2 from contextlib import contextmanager 3 4 from qtpy.QtWidgets import QApplication 5 6 7 @contextmanager 8 def gui_qt(): 9 """Start a Qt event loop in which to run the application. 10 11 Notes 12 ----- 13 This context manager is not needed if running napari within an interactive 14 IPython session. In this case, use the ``%gui qt`` magic command, or start 15 IPython with the Qt GUI event loop enabled by default by using 16 ``ipython --gui=qt``. 17 """ 18 app = QApplication.instance() or QApplication(sys.argv) 19 yield 20 app.exec_() 21 [end of napari/_qt/event_loop.py] [start of napari/__main__.py] 1 """ 2 napari command line viewer. 3 """ 4 import argparse 5 import sys 6 7 import numpy as np 8 9 from .util import io 10 from . import Viewer, gui_qt 11 12 13 def main(): 14 parser = argparse.ArgumentParser(usage=__doc__) 15 parser.add_argument('images', nargs='*', help='Images to view.') 16 parser.add_argument( 17 '--layers', 18 action='store_true', 19 help='Treat multiple input images as layers.', 20 ) 21 parser.add_argument( 22 '-r', 23 '--rgb', 24 help='Treat images as RGB.', 25 action='store_true', 26 default=None, 27 ) 28 parser.add_argument( 29 '-g', 30 '--grayscale', 31 dest='rgb', 32 action='store_false', 33 help='interpret all dimensions in the image as spatial', 34 ) 35 parser.add_argument( 36 '-D', 37 '--use-dask', 38 action='store_true', 39 help='Use dask to read in images. This conserves memory. This option ' 40 'does nothing if a single image is given.', 41 default=None, 42 ) 43 parser.add_argument( 44 '-N', 45 '--use-numpy', 46 action='store_false', 47 dest='use_dask', 48 help='Use NumPy to read in images. This can be more performant than ' 49 'dask if all the images fit in RAM. This option does nothing if ' 50 'only a single image is given.', 51 ) 52 args = parser.parse_args() 53 with gui_qt(): 54 v = Viewer() 55 if len(args.images) > 0: 56 images = io.magic_imread( 57 args.images, use_dask=args.use_dask, stack=not args.layers 58 ) 59 if args.layers: 60 for layer in images: 61 if layer.dtype in ( 62 np.int32, 63 np.uint32, 64 np.int64, 65 np.uint64, 66 ): 67 v.add_labels(layer) 68 else: 69 v.add_image(layer, rgb=args.rgb) 70 else: 71 v.add_image(images, rgb=args.rgb) 72 73 74 if __name__ == '__main__': 75 sys.exit(main()) 76 [end of napari/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/__main__.py b/napari/__main__.py --- a/napari/__main__.py +++ b/napari/__main__.py @@ -50,7 +50,7 @@ 'only a single image is given.', ) args = parser.parse_args() - with gui_qt(): + with gui_qt(startup_logo=True): v = Viewer() if len(args.images) > 0: images = io.magic_imread( diff --git a/napari/_qt/event_loop.py b/napari/_qt/event_loop.py --- a/napari/_qt/event_loop.py +++ b/napari/_qt/event_loop.py @@ -1,13 +1,20 @@ import sys from contextlib import contextmanager +from os.path import dirname, join -from qtpy.QtWidgets import QApplication +from qtpy.QtGui import QPixmap +from qtpy.QtWidgets import QApplication, QSplashScreen @contextmanager -def gui_qt(): +def gui_qt(*, startup_logo=False): """Start a Qt event loop in which to run the application. + Parameters + ---------- + startup_logo : bool + Show a splash screen with the napari logo during startup. + Notes ----- This context manager is not needed if running napari within an interactive @@ -16,5 +23,11 @@ ``ipython --gui=qt``. """ app = QApplication.instance() or QApplication(sys.argv) + if startup_logo: + logopath = join(dirname(__file__), '..', 'resources', 'logo.png') + splash_widget = QSplashScreen(QPixmap(logopath).scaled(400, 400)) + splash_widget.show() yield + if startup_logo: + splash_widget.close() app.exec_()
{"golden_diff": "diff --git a/napari/__main__.py b/napari/__main__.py\n--- a/napari/__main__.py\n+++ b/napari/__main__.py\n@@ -50,7 +50,7 @@\n 'only a single image is given.',\n )\n args = parser.parse_args()\n- with gui_qt():\n+ with gui_qt(startup_logo=True):\n v = Viewer()\n if len(args.images) > 0:\n images = io.magic_imread(\ndiff --git a/napari/_qt/event_loop.py b/napari/_qt/event_loop.py\n--- a/napari/_qt/event_loop.py\n+++ b/napari/_qt/event_loop.py\n@@ -1,13 +1,20 @@\n import sys\n from contextlib import contextmanager\n+from os.path import dirname, join\n \n-from qtpy.QtWidgets import QApplication\n+from qtpy.QtGui import QPixmap\n+from qtpy.QtWidgets import QApplication, QSplashScreen\n \n \n @contextmanager\n-def gui_qt():\n+def gui_qt(*, startup_logo=False):\n \"\"\"Start a Qt event loop in which to run the application.\n \n+ Parameters\n+ ----------\n+ startup_logo : bool\n+ Show a splash screen with the napari logo during startup.\n+\n Notes\n -----\n This context manager is not needed if running napari within an interactive\n@@ -16,5 +23,11 @@\n ``ipython --gui=qt``.\n \"\"\"\n app = QApplication.instance() or QApplication(sys.argv)\n+ if startup_logo:\n+ logopath = join(dirname(__file__), '..', 'resources', 'logo.png')\n+ splash_widget = QSplashScreen(QPixmap(logopath).scaled(400, 400))\n+ splash_widget.show()\n yield\n+ if startup_logo:\n+ splash_widget.close()\n app.exec_()\n", "issue": "Show logo during startup\n## \ud83d\ude80 Feature\r\nRight now napari takes a long time to launch the viewer. People can get discouraged as they wait, unsure if napari is loading, or if their data is loading, or if it is just hanging. Showing the napari logo, possibly with / without some text during that time might make it feel better.\n", "before_files": [{"content": "import sys\nfrom contextlib import contextmanager\n\nfrom qtpy.QtWidgets import QApplication\n\n\n@contextmanager\ndef gui_qt():\n \"\"\"Start a Qt event loop in which to run the application.\n\n Notes\n -----\n This context manager is not needed if running napari within an interactive\n IPython session. In this case, use the ``%gui qt`` magic command, or start\n IPython with the Qt GUI event loop enabled by default by using\n ``ipython --gui=qt``.\n \"\"\"\n app = QApplication.instance() or QApplication(sys.argv)\n yield\n app.exec_()\n", "path": "napari/_qt/event_loop.py"}, {"content": "\"\"\"\nnapari command line viewer.\n\"\"\"\nimport argparse\nimport sys\n\nimport numpy as np\n\nfrom .util import io\nfrom . import Viewer, gui_qt\n\n\ndef main():\n parser = argparse.ArgumentParser(usage=__doc__)\n parser.add_argument('images', nargs='*', help='Images to view.')\n parser.add_argument(\n '--layers',\n action='store_true',\n help='Treat multiple input images as layers.',\n )\n parser.add_argument(\n '-r',\n '--rgb',\n help='Treat images as RGB.',\n action='store_true',\n default=None,\n )\n parser.add_argument(\n '-g',\n '--grayscale',\n dest='rgb',\n action='store_false',\n help='interpret all dimensions in the image as spatial',\n )\n parser.add_argument(\n '-D',\n '--use-dask',\n action='store_true',\n help='Use dask to read in images. This conserves memory. This option '\n 'does nothing if a single image is given.',\n default=None,\n )\n parser.add_argument(\n '-N',\n '--use-numpy',\n action='store_false',\n dest='use_dask',\n help='Use NumPy to read in images. This can be more performant than '\n 'dask if all the images fit in RAM. This option does nothing if '\n 'only a single image is given.',\n )\n args = parser.parse_args()\n with gui_qt():\n v = Viewer()\n if len(args.images) > 0:\n images = io.magic_imread(\n args.images, use_dask=args.use_dask, stack=not args.layers\n )\n if args.layers:\n for layer in images:\n if layer.dtype in (\n np.int32,\n np.uint32,\n np.int64,\n np.uint64,\n ):\n v.add_labels(layer)\n else:\n v.add_image(layer, rgb=args.rgb)\n else:\n v.add_image(images, rgb=args.rgb)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "napari/__main__.py"}]}
1,389
414
gh_patches_debug_12149
rasdani/github-patches
git_diff
mindsdb__mindsdb-813
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add endpoint for mindsdb version Add HTTP route to get a version of mindsdb: /util/version/ That should return as mindsb version in JSON view: ``` { "mindsdb": "2.11.2" } ``` The new endpoint needs to be added in [Utils](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/api/http/namespaces/util.py). </issue> <code> [start of mindsdb/api/http/namespaces/util.py] 1 from flask import request 2 from flask_restx import Resource, abort 3 4 from mindsdb.api.http.namespaces.configs.util import ns_conf 5 6 7 @ns_conf.route('/ping') 8 class Ping(Resource): 9 @ns_conf.doc('get_ping') 10 def get(self): 11 '''Checks server avaliable''' 12 return {'status': 'ok'} 13 14 15 @ns_conf.route('/shutdown') 16 class Shutdown(Resource): 17 @ns_conf.doc('get_shutdown') 18 def get(self): 19 '''Shutdown server''' 20 if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'): 21 func = request.environ.get('werkzeug.server.shutdown') 22 if func is None: 23 return '', 500 24 func() 25 return '', 200 26 abort(403, "") 27 [end of mindsdb/api/http/namespaces/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py --- a/mindsdb/api/http/namespaces/util.py +++ b/mindsdb/api/http/namespaces/util.py @@ -2,7 +2,7 @@ from flask_restx import Resource, abort from mindsdb.api.http.namespaces.configs.util import ns_conf - +from mindsdb import __about__ @ns_conf.route('/ping') class Ping(Resource): @@ -24,3 +24,12 @@ func() return '', 200 abort(403, "") + + +@ns_conf.route('/util/version') +class Version(Resource): + @ns_conf.doc('get_endpoint') + def get(self): + '''Check endpoint''' + return {'mindsdb': "{__about__.__version__}"} +
{"golden_diff": "diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py\n--- a/mindsdb/api/http/namespaces/util.py\n+++ b/mindsdb/api/http/namespaces/util.py\n@@ -2,7 +2,7 @@\n from flask_restx import Resource, abort\n \n from mindsdb.api.http.namespaces.configs.util import ns_conf\n-\n+from mindsdb import __about__\n \n @ns_conf.route('/ping')\n class Ping(Resource):\n@@ -24,3 +24,12 @@\n func()\n return '', 200\n abort(403, \"\")\n+\n+ \n+@ns_conf.route('/util/version')\n+class Version(Resource):\n+ @ns_conf.doc('get_endpoint')\n+ def get(self):\n+ '''Check endpoint'''\n+ return {'mindsdb': \"{__about__.__version__}\"}\n+\n", "issue": "Add endpoint for mindsdb version\nAdd HTTP route to get a version of mindsdb: /util/version/\r\nThat should return as mindsb version in JSON view:\r\n```\r\n{\r\n \"mindsdb\": \"2.11.2\"\r\n}\r\n```\r\nThe new endpoint needs to be added in [Utils](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/api/http/namespaces/util.py).\n", "before_files": [{"content": "from flask import request\nfrom flask_restx import Resource, abort\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\n\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n\n@ns_conf.route('/shutdown')\nclass Shutdown(Resource):\n @ns_conf.doc('get_shutdown')\n def get(self):\n '''Shutdown server'''\n if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n return '', 500\n func()\n return '', 200\n abort(403, \"\")\n", "path": "mindsdb/api/http/namespaces/util.py"}]}
848
195
gh_patches_debug_11524
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-1374
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [V3/Config] Clearing a registered data group when no data has been set results in a KeyError. Please be sure to read through other issues as well to make sure what you are suggesting/reporting has not already been suggested/reported ### Type: - [ ] Suggestion - [x] Bug ### Brief description of the problem When attempting to use methods such as `.clear_all_members` and `.clear_all_globals`, when no data has been set, will result in a KeyError being raised. ### Expected behavior Silently passes ### Actual behavior Key Error is raised. ### Steps to reproduce 1. register some data 2. try to clear all data from that scope 3. Key error is raised 4. You cry </issue> <code> [start of redbot/core/drivers/red_json.py] 1 from pathlib import Path 2 from typing import Tuple 3 4 from ..json_io import JsonIO 5 6 from .red_base import BaseDriver 7 8 __all__ = ["JSON"] 9 10 11 class JSON(BaseDriver): 12 """ 13 Subclass of :py:class:`.red_base.BaseDriver`. 14 15 .. py:attribute:: file_name 16 17 The name of the file in which to store JSON data. 18 19 .. py:attribute:: data_path 20 21 The path in which to store the file indicated by :py:attr:`file_name`. 22 """ 23 def __init__(self, cog_name, *, data_path_override: Path=None, 24 file_name_override: str="settings.json"): 25 super().__init__(cog_name) 26 self.file_name = file_name_override 27 if data_path_override: 28 self.data_path = data_path_override 29 else: 30 self.data_path = Path.cwd() / 'cogs' / '.data' / self.cog_name 31 32 self.data_path.mkdir(parents=True, exist_ok=True) 33 34 self.data_path = self.data_path / self.file_name 35 36 self.jsonIO = JsonIO(self.data_path) 37 38 try: 39 self.data = self.jsonIO._load_json() 40 except FileNotFoundError: 41 self.data = {} 42 self.jsonIO._save_json(self.data) 43 44 async def get(self, *identifiers: Tuple[str]): 45 partial = self.data 46 full_identifiers = (self.unique_cog_identifier, *identifiers) 47 for i in full_identifiers: 48 partial = partial[i] 49 return partial 50 51 async def set(self, *identifiers: str, value=None): 52 partial = self.data 53 full_identifiers = (self.unique_cog_identifier, *identifiers) 54 for i in full_identifiers[:-1]: 55 if i not in partial: 56 partial[i] = {} 57 partial = partial[i] 58 59 partial[full_identifiers[-1]] = value 60 await self.jsonIO._threadsafe_save_json(self.data) 61 62 async def clear(self, *identifiers: str): 63 partial = self.data 64 full_identifiers = (self.unique_cog_identifier, *identifiers) 65 for i in full_identifiers[:-1]: 66 if i not in partial: 67 break 68 partial = partial[i] 69 else: 70 del partial[identifiers[-1]] 71 await self.jsonIO._threadsafe_save_json(self.data) 72 [end of redbot/core/drivers/red_json.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redbot/core/drivers/red_json.py b/redbot/core/drivers/red_json.py --- a/redbot/core/drivers/red_json.py +++ b/redbot/core/drivers/red_json.py @@ -62,10 +62,11 @@ async def clear(self, *identifiers: str): partial = self.data full_identifiers = (self.unique_cog_identifier, *identifiers) - for i in full_identifiers[:-1]: - if i not in partial: - break - partial = partial[i] - else: + try: + for i in full_identifiers[:-1]: + partial = partial[i] del partial[identifiers[-1]] - await self.jsonIO._threadsafe_save_json(self.data) + except KeyError: + pass + else: + await self.jsonIO._threadsafe_save_json(self.data)
{"golden_diff": "diff --git a/redbot/core/drivers/red_json.py b/redbot/core/drivers/red_json.py\n--- a/redbot/core/drivers/red_json.py\n+++ b/redbot/core/drivers/red_json.py\n@@ -62,10 +62,11 @@\n async def clear(self, *identifiers: str):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n- for i in full_identifiers[:-1]:\n- if i not in partial:\n- break\n- partial = partial[i]\n- else:\n+ try:\n+ for i in full_identifiers[:-1]:\n+ partial = partial[i]\n del partial[identifiers[-1]]\n- await self.jsonIO._threadsafe_save_json(self.data)\n+ except KeyError:\n+ pass\n+ else:\n+ await self.jsonIO._threadsafe_save_json(self.data)\n", "issue": "[V3/Config] Clearing a registered data group when no data has been set results in a KeyError.\nPlease be sure to read through other issues as well to make sure what you are suggesting/reporting has not already\r\nbeen suggested/reported\r\n\r\n### Type:\r\n\r\n- [ ] Suggestion\r\n- [x] Bug\r\n\r\n### Brief description of the problem\r\nWhen attempting to use methods such as `.clear_all_members` and `.clear_all_globals`, when no data has been set, will result in a KeyError being raised.\r\n### Expected behavior\r\nSilently passes\r\n### Actual behavior\r\nKey Error is raised.\r\n### Steps to reproduce\r\n\r\n1. register some data\r\n2. try to clear all data from that scope\r\n3. Key error is raised\r\n4. You cry\r\n\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Tuple\n\nfrom ..json_io import JsonIO\n\nfrom .red_base import BaseDriver\n\n__all__ = [\"JSON\"]\n\n\nclass JSON(BaseDriver):\n \"\"\"\n Subclass of :py:class:`.red_base.BaseDriver`.\n\n .. py:attribute:: file_name\n\n The name of the file in which to store JSON data.\n\n .. py:attribute:: data_path\n\n The path in which to store the file indicated by :py:attr:`file_name`.\n \"\"\"\n def __init__(self, cog_name, *, data_path_override: Path=None,\n file_name_override: str=\"settings.json\"):\n super().__init__(cog_name)\n self.file_name = file_name_override\n if data_path_override:\n self.data_path = data_path_override\n else:\n self.data_path = Path.cwd() / 'cogs' / '.data' / self.cog_name\n\n self.data_path.mkdir(parents=True, exist_ok=True)\n\n self.data_path = self.data_path / self.file_name\n\n self.jsonIO = JsonIO(self.data_path)\n\n try:\n self.data = self.jsonIO._load_json()\n except FileNotFoundError:\n self.data = {}\n self.jsonIO._save_json(self.data)\n\n async def get(self, *identifiers: Tuple[str]):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers:\n partial = partial[i]\n return partial\n\n async def set(self, *identifiers: str, value=None):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers[:-1]:\n if i not in partial:\n partial[i] = {}\n partial = partial[i]\n\n partial[full_identifiers[-1]] = value\n await self.jsonIO._threadsafe_save_json(self.data)\n\n async def clear(self, *identifiers: str):\n partial = self.data\n full_identifiers = (self.unique_cog_identifier, *identifiers)\n for i in full_identifiers[:-1]:\n if i not in partial:\n break\n partial = partial[i]\n else:\n del partial[identifiers[-1]]\n await self.jsonIO._threadsafe_save_json(self.data)\n", "path": "redbot/core/drivers/red_json.py"}]}
1,343
195
gh_patches_debug_7462
rasdani/github-patches
git_diff
arviz-devs__arviz-644
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> add api docs for plot_energy https://arviz-devs.github.io/arviz/generated/arviz.plot_energy.html#arviz.plot_energy See Posterior Docs for example https://arviz-devs.github.io/arviz/generated/arviz.plot_posterior.html#arviz.plot_posterior </issue> <code> [start of arviz/plots/energyplot.py] 1 """Plot energy transition distribution in HMC inference.""" 2 import numpy as np 3 import matplotlib.pyplot as plt 4 5 from ..data import convert_to_dataset 6 from ..stats import bfmi as e_bfmi 7 from .kdeplot import plot_kde 8 from .plot_utils import _scale_fig_size 9 10 11 def plot_energy( 12 data, 13 kind="kde", 14 bfmi=True, 15 figsize=None, 16 legend=True, 17 fill_alpha=(1, 0.75), 18 fill_color=("C0", "C5"), 19 bw=4.5, 20 textsize=None, 21 fill_kwargs=None, 22 plot_kwargs=None, 23 ax=None, 24 ): 25 """Plot energy transition distribution and marginal energy distribution in HMC algorithms. 26 27 This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS. 28 29 Parameters 30 ---------- 31 data : xarray dataset, or object that can be converted (must represent 32 `sample_stats` and have an `energy` variable) 33 kind : str 34 Type of plot to display (kde or histogram) 35 bfmi : bool 36 If True add to the plot the value of the estimated Bayesian fraction of missing information 37 figsize : tuple 38 Figure size. If None it will be defined automatically. 39 legend : bool 40 Flag for plotting legend (defaults to True) 41 fill_alpha : tuple of floats 42 Alpha blending value for the shaded area under the curve, between 0 43 (no shade) and 1 (opaque). Defaults to (1, .75) 44 fill_color : tuple of valid matplotlib color 45 Color for Marginal energy distribution and Energy transition distribution. 46 Defaults to ('C0', 'C5') 47 bw : float 48 Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the 49 smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule 50 of thumb (the default rule used by SciPy). Only works if `kind='kde'` 51 textsize: float 52 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based 53 on figsize. 54 fill_kwargs : dicts, optional 55 Additional keywords passed to `arviz.plot_kde` (to control the shade) 56 plot_kwargs : dicts, optional 57 Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist') 58 ax : axes 59 Matplotlib axes. 60 61 Returns 62 ------- 63 ax : matplotlib axes 64 """ 65 energy = convert_to_dataset(data, group="sample_stats").energy.values 66 67 if ax is None: 68 _, ax = plt.subplots(figsize=figsize, constrained_layout=True) 69 70 if fill_kwargs is None: 71 fill_kwargs = {} 72 73 if plot_kwargs is None: 74 plot_kwargs = {} 75 76 figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1) 77 78 series = zip( 79 fill_alpha, 80 fill_color, 81 ("Marginal Energy", "Energy transition"), 82 (energy - energy.mean(), np.diff(energy)), 83 ) 84 85 if kind == "kde": 86 for alpha, color, label, value in series: 87 fill_kwargs["alpha"] = alpha 88 fill_kwargs["color"] = color 89 plot_kwargs.setdefault("color", color) 90 plot_kwargs.setdefault("alpha", 0) 91 plot_kwargs.setdefault("linewidth", linewidth) 92 plot_kde( 93 value, 94 bw=bw, 95 label=label, 96 textsize=xt_labelsize, 97 plot_kwargs=plot_kwargs, 98 fill_kwargs=fill_kwargs, 99 ax=ax, 100 ) 101 102 elif kind == "hist": 103 for alpha, color, label, value in series: 104 ax.hist( 105 value.flatten(), 106 bins="auto", 107 density=True, 108 alpha=alpha, 109 label=label, 110 color=color, 111 **plot_kwargs 112 ) 113 114 else: 115 raise ValueError("Plot type {} not recognized.".format(kind)) 116 117 if bfmi: 118 for idx, val in enumerate(e_bfmi(energy)): 119 ax.plot([], label="chain {:>2} BFMI = {:.2f}".format(idx, val), alpha=0) 120 121 ax.set_xticks([]) 122 ax.set_yticks([]) 123 124 if legend: 125 ax.legend() 126 127 return ax 128 [end of arviz/plots/energyplot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/arviz/plots/energyplot.py b/arviz/plots/energyplot.py --- a/arviz/plots/energyplot.py +++ b/arviz/plots/energyplot.py @@ -61,6 +61,25 @@ Returns ------- ax : matplotlib axes + + Examples + -------- + Plot a default energy plot + + .. plot:: + :context: close-figs + + >>> import arviz as az + >>> data = az.load_arviz_data('centered_eight') + >>> az.plot_energy(data) + + Represent energy plot via histograms + + .. plot:: + :context: close-figs + + >>> az.plot_energy(data, kind='hist') + """ energy = convert_to_dataset(data, group="sample_stats").energy.values
{"golden_diff": "diff --git a/arviz/plots/energyplot.py b/arviz/plots/energyplot.py\n--- a/arviz/plots/energyplot.py\n+++ b/arviz/plots/energyplot.py\n@@ -61,6 +61,25 @@\n Returns\n -------\n ax : matplotlib axes\n+\n+ Examples\n+ --------\n+ Plot a default energy plot\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> import arviz as az\n+ >>> data = az.load_arviz_data('centered_eight')\n+ >>> az.plot_energy(data)\n+\n+ Represent energy plot via histograms\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> az.plot_energy(data, kind='hist')\n+\n \"\"\"\n energy = convert_to_dataset(data, group=\"sample_stats\").energy.values\n", "issue": "add api docs for plot_energy\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_energy.html#arviz.plot_energy\r\n\r\nSee Posterior Docs for example\r\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_posterior.html#arviz.plot_posterior\n", "before_files": [{"content": "\"\"\"Plot energy transition distribution in HMC inference.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ..data import convert_to_dataset\nfrom ..stats import bfmi as e_bfmi\nfrom .kdeplot import plot_kde\nfrom .plot_utils import _scale_fig_size\n\n\ndef plot_energy(\n data,\n kind=\"kde\",\n bfmi=True,\n figsize=None,\n legend=True,\n fill_alpha=(1, 0.75),\n fill_color=(\"C0\", \"C5\"),\n bw=4.5,\n textsize=None,\n fill_kwargs=None,\n plot_kwargs=None,\n ax=None,\n):\n \"\"\"Plot energy transition distribution and marginal energy distribution in HMC algorithms.\n\n This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.\n\n Parameters\n ----------\n data : xarray dataset, or object that can be converted (must represent\n `sample_stats` and have an `energy` variable)\n kind : str\n Type of plot to display (kde or histogram)\n bfmi : bool\n If True add to the plot the value of the estimated Bayesian fraction of missing information\n figsize : tuple\n Figure size. If None it will be defined automatically.\n legend : bool\n Flag for plotting legend (defaults to True)\n fill_alpha : tuple of floats\n Alpha blending value for the shaded area under the curve, between 0\n (no shade) and 1 (opaque). Defaults to (1, .75)\n fill_color : tuple of valid matplotlib color\n Color for Marginal energy distribution and Energy transition distribution.\n Defaults to ('C0', 'C5')\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy). Only works if `kind='kde'`\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n fill_kwargs : dicts, optional\n Additional keywords passed to `arviz.plot_kde` (to control the shade)\n plot_kwargs : dicts, optional\n Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')\n ax : axes\n Matplotlib axes.\n\n Returns\n -------\n ax : matplotlib axes\n \"\"\"\n energy = convert_to_dataset(data, group=\"sample_stats\").energy.values\n\n if ax is None:\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n if fill_kwargs is None:\n fill_kwargs = {}\n\n if plot_kwargs is None:\n plot_kwargs = {}\n\n figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n series = zip(\n fill_alpha,\n fill_color,\n (\"Marginal Energy\", \"Energy transition\"),\n (energy - energy.mean(), np.diff(energy)),\n )\n\n if kind == \"kde\":\n for alpha, color, label, value in series:\n fill_kwargs[\"alpha\"] = alpha\n fill_kwargs[\"color\"] = color\n plot_kwargs.setdefault(\"color\", color)\n plot_kwargs.setdefault(\"alpha\", 0)\n plot_kwargs.setdefault(\"linewidth\", linewidth)\n plot_kde(\n value,\n bw=bw,\n label=label,\n textsize=xt_labelsize,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n ax=ax,\n )\n\n elif kind == \"hist\":\n for alpha, color, label, value in series:\n ax.hist(\n value.flatten(),\n bins=\"auto\",\n density=True,\n alpha=alpha,\n label=label,\n color=color,\n **plot_kwargs\n )\n\n else:\n raise ValueError(\"Plot type {} not recognized.\".format(kind))\n\n if bfmi:\n for idx, val in enumerate(e_bfmi(energy)):\n ax.plot([], label=\"chain {:>2} BFMI = {:.2f}\".format(idx, val), alpha=0)\n\n ax.set_xticks([])\n ax.set_yticks([])\n\n if legend:\n ax.legend()\n\n return ax\n", "path": "arviz/plots/energyplot.py"}]}
1,816
192
gh_patches_debug_14517
rasdani/github-patches
git_diff
Qiskit__qiskit-862
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make interactive visualizations default for Jupyter <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. --> ### What is the expected enhancement? The interactive visualizations contributed in PR #765 are much better than the static ones, and should be default when the environment is a Jupyter notebook. This is related to Epic #707 for providing better jupyter tools. Several issues must be solved first: - These visualizations need internet connection. Can we eliminate that? - The interface of the static and interactive visualizations are all the same, except for `plot_histogram` vs `iplot_histogram`. These should be made similar for consistency. But the interactive visualization is able to plot multiple histograms in one plot. - How "heavy" is it to run these interactive visualizations? I hear my laptop fan. </issue> <code> [start of qiskit/tools/visualization/__init__.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright 2018, IBM. 4 # 5 # This source code is licensed under the Apache License, Version 2.0 found in 6 # the LICENSE.txt file in the root directory of this source tree. 7 8 """Main QISKit visualization methods.""" 9 10 from ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\ 11 latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme 12 from ._state_visualization import plot_state 13 from ._counts_visualization import plot_histogram 14 [end of qiskit/tools/visualization/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qiskit/tools/visualization/__init__.py b/qiskit/tools/visualization/__init__.py --- a/qiskit/tools/visualization/__init__.py +++ b/qiskit/tools/visualization/__init__.py @@ -7,7 +7,18 @@ """Main QISKit visualization methods.""" +import sys + from ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\ latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme -from ._state_visualization import plot_state from ._counts_visualization import plot_histogram + +if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules): + import requests + if requests.get( + 'https://qvisualization.mybluemix.net/').status_code == 200: + from .interactive._iplot_state import iplot_state as plot_state + else: + from ._state_visualization import plot_state +else: + from ._state_visualization import plot_state
{"golden_diff": "diff --git a/qiskit/tools/visualization/__init__.py b/qiskit/tools/visualization/__init__.py\n--- a/qiskit/tools/visualization/__init__.py\n+++ b/qiskit/tools/visualization/__init__.py\n@@ -7,7 +7,18 @@\n \n \"\"\"Main QISKit visualization methods.\"\"\"\n \n+import sys\n+\n from ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\\\n latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme\n-from ._state_visualization import plot_state\n from ._counts_visualization import plot_histogram\n+\n+if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):\n+ import requests\n+ if requests.get(\n+ 'https://qvisualization.mybluemix.net/').status_code == 200:\n+ from .interactive._iplot_state import iplot_state as plot_state\n+ else:\n+ from ._state_visualization import plot_state\n+else:\n+ from ._state_visualization import plot_state\n", "issue": "Make interactive visualizations default for Jupyter\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nThe interactive visualizations contributed in PR #765 are much better than the static ones, and should be default when the environment is a Jupyter notebook. This is related to Epic #707 for providing better jupyter tools.\r\n\r\nSeveral issues must be solved first:\r\n- These visualizations need internet connection. Can we eliminate that?\r\n- The interface of the static and interactive visualizations are all the same, except for `plot_histogram` vs `iplot_histogram`. These should be made similar for consistency. But the interactive visualization is able to plot multiple histograms in one plot.\r\n- How \"heavy\" is it to run these interactive visualizations? I hear my laptop fan.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Main QISKit visualization methods.\"\"\"\n\nfrom ._circuit_visualization import circuit_drawer, plot_circuit, generate_latex_source,\\\n latex_circuit_drawer, matplotlib_circuit_drawer, qx_color_scheme\nfrom ._state_visualization import plot_state\nfrom ._counts_visualization import plot_histogram\n", "path": "qiskit/tools/visualization/__init__.py"}]}
875
231
gh_patches_debug_3440
rasdani/github-patches
git_diff
spack__spack-21932
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> shell test kcov suggestion does not work ### Steps to reproduce the issue ```console $ ./share/spack/qa/run-shell-tests ERROR: kcov is required to run this script. To install with Spack, run: $ spack install kcov Then add the bin directory to your PATH. $ spack load kcov $ ./share/spack/qa/run-shell-tests ERROR: kcov is required to run this script. To install with Spack, run: $ spack install kcov Then add the bin directory to your PATH. ``` Seems like there is no kcov binary: ``` $ kcov Command 'kcov' not found, but can be installed with: sudo apt install kcov ``` * **Spack:** 0.16.0-1501-e8eb76c198 * **Python:** 3.8.7 * **Platform:** linux-ubuntu20.04-zen2 * **Concretizer:** clingo ### Additional information - [x] I have run `spack debug report` and reported the version of Spack/Python/Platform - [x] I have searched the issues of this repo and believe this is not a duplicate - [x] I have run the failing commands in debug mode and reported the output </issue> <code> [start of var/spack/repos/builtin/packages/kcov/package.py] 1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class Kcov(CMakePackage): 10 """Code coverage tool for compiled programs, Python and Bash which uses 11 debugging information to collect and report data without special 12 compilation options""" 13 14 homepage = "http://simonkagstrom.github.io/kcov/index.html" 15 url = "https://github.com/SimonKagstrom/kcov/archive/38.tar.gz" 16 17 version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4') 18 19 depends_on('[email protected]:', type='build') 20 depends_on('zlib') 21 depends_on('curl') 22 23 def cmake_args(self): 24 # Necessary at least on macOS, fixes linking error to LLDB 25 # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb 26 return ['-DSPECIFY_RPATH=ON'] 27 28 @run_after('install') 29 @on_package_attributes(run_tests=True) 30 def test_install(self): 31 # The help message exits with an exit code of 1 32 kcov = Executable(self.prefix.bin.kcov) 33 kcov('-h', ignore_errors=1) 34 [end of var/spack/repos/builtin/packages/kcov/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/kcov/package.py b/var/spack/repos/builtin/packages/kcov/package.py --- a/var/spack/repos/builtin/packages/kcov/package.py +++ b/var/spack/repos/builtin/packages/kcov/package.py @@ -19,6 +19,8 @@ depends_on('[email protected]:', type='build') depends_on('zlib') depends_on('curl') + depends_on('elfutils') + depends_on('binutils +libiberty', type='link') def cmake_args(self): # Necessary at least on macOS, fixes linking error to LLDB
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/kcov/package.py b/var/spack/repos/builtin/packages/kcov/package.py\n--- a/var/spack/repos/builtin/packages/kcov/package.py\n+++ b/var/spack/repos/builtin/packages/kcov/package.py\n@@ -19,6 +19,8 @@\n depends_on('[email protected]:', type='build')\n depends_on('zlib')\n depends_on('curl')\n+ depends_on('elfutils')\n+ depends_on('binutils +libiberty', type='link')\n \n def cmake_args(self):\n # Necessary at least on macOS, fixes linking error to LLDB\n", "issue": "shell test kcov suggestion does not work\n### Steps to reproduce the issue\r\n\r\n```console\r\n$ ./share/spack/qa/run-shell-tests \r\nERROR: kcov is required to run this script.\r\n\r\nTo install with Spack, run:\r\n $ spack install kcov\r\nThen add the bin directory to your PATH.\r\n$ spack load kcov\r\n$ ./share/spack/qa/run-shell-tests \r\nERROR: kcov is required to run this script.\r\n\r\nTo install with Spack, run:\r\n $ spack install kcov\r\nThen add the bin directory to your PATH.\r\n```\r\n\r\nSeems like there is no kcov binary:\r\n\r\n```\r\n$ kcov\r\n\r\nCommand 'kcov' not found, but can be installed with:\r\n\r\nsudo apt install kcov\r\n```\r\n\r\n* **Spack:** 0.16.0-1501-e8eb76c198\r\n* **Python:** 3.8.7\r\n* **Platform:** linux-ubuntu20.04-zen2\r\n* **Concretizer:** clingo\r\n\r\n### Additional information\r\n\r\n- [x] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [x] I have searched the issues of this repo and believe this is not a duplicate\r\n- [x] I have run the failing commands in debug mode and reported the output\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Kcov(CMakePackage):\n \"\"\"Code coverage tool for compiled programs, Python and Bash which uses\n debugging information to collect and report data without special\n compilation options\"\"\"\n\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n url = \"https://github.com/SimonKagstrom/kcov/archive/38.tar.gz\"\n\n version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')\n\n depends_on('[email protected]:', type='build')\n depends_on('zlib')\n depends_on('curl')\n\n def cmake_args(self):\n # Necessary at least on macOS, fixes linking error to LLDB\n # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb\n return ['-DSPECIFY_RPATH=ON']\n\n @run_after('install')\n @on_package_attributes(run_tests=True)\n def test_install(self):\n # The help message exits with an exit code of 1\n kcov = Executable(self.prefix.bin.kcov)\n kcov('-h', ignore_errors=1)\n", "path": "var/spack/repos/builtin/packages/kcov/package.py"}]}
1,248
145
gh_patches_debug_23351
rasdani/github-patches
git_diff
pwndbg__pwndbg-671
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/xxx']' returned non-zero exit status 2. ``` pwndbg> checksec Traceback (most recent call last): File "/root/pwndbg/pwndbg/commands/__init__.py", line 109, in __call__ return self.function(*args, **kwargs) File "/root/pwndbg/pwndbg/commands/__init__.py", line 189, in _OnlyWithFile return function(*a, **kw) File "/root/pwndbg/pwndbg/commands/checksec.py", line 16, in checksec print(pwndbg.wrappers.checksec.get_raw_out()) File "/root/pwndbg/pwndbg/commands/__init__.py", line 189, in _OnlyWithFile return function(*a, **kw) File "/root/pwndbg/pwndbg/wrappers/__init__.py", line 28, in _OnlyWithCommand return function(*a, **kw) File "/root/pwndbg/pwndbg/memoize.py", line 48, in __call__ value = self.func(*args, **kwargs) File "/root/pwndbg/pwndbg/wrappers/checksec.py", line 20, in get_raw_out return pwndbg.wrappers.call_cmd(cmd) File "/root/pwndbg/pwndbg/wrappers/__init__.py", line 35, in call_cmd return subprocess.check_output(cmd, stderr=STDOUT).decode('utf-8') File "/usr/lib/python3.7/subprocess.py", line 395, in check_output **kwargs).stdout File "/usr/lib/python3.7/subprocess.py", line 487, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/forgot']' returned non-zero exit status 2. ``` when checksec running in python3 pwntools environment, raise a failure. Here's the reason: ``` root@kali:/tmp# checksec --file forgot usage: checksec [-h] elf [elf ...] checksec: error: unrecognized arguments: --file ``` So modify pwndbg/pwndbg/wrappers/checksec.py: 19 with: `cmd = [get_raw_out.cmd_path, local_path]` Then it will be solved </issue> <code> [start of pwndbg/wrappers/checksec.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import pwndbg.commands 9 import pwndbg.memoize 10 import pwndbg.wrappers 11 12 cmd_name = "checksec" 13 14 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 15 @pwndbg.memoize.reset_on_objfile 16 def get_raw_out(): 17 18 local_path = pwndbg.file.get_file(pwndbg.proc.exe) 19 cmd = [get_raw_out.cmd_path, "--file", local_path] 20 return pwndbg.wrappers.call_cmd(cmd) 21 22 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 23 def relro_status(): 24 relro = "No RELRO" 25 out = get_raw_out() 26 27 if "Full RELRO" in out: 28 relro = "Full RELRO" 29 elif "Partial RELRO" in out: 30 relro = "Partial RELRO" 31 32 return relro 33 34 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 35 def pie_status(): 36 pie = "No PIE" 37 out = get_raw_out() 38 39 if "PIE enabled" in out: 40 pie = "PIE enabled" 41 42 return pie 43 [end of pwndbg/wrappers/checksec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py --- a/pwndbg/wrappers/checksec.py +++ b/pwndbg/wrappers/checksec.py @@ -5,6 +5,10 @@ from __future__ import print_function from __future__ import unicode_literals +import subprocess +from re import search +from subprocess import STDOUT + import pwndbg.commands import pwndbg.memoize import pwndbg.wrappers @@ -14,10 +18,17 @@ @pwndbg.wrappers.OnlyWithCommand(cmd_name) @pwndbg.memoize.reset_on_objfile def get_raw_out(): - local_path = pwndbg.file.get_file(pwndbg.proc.exe) - cmd = [get_raw_out.cmd_path, "--file", local_path] - return pwndbg.wrappers.call_cmd(cmd) + try: + version_output = subprocess.check_output([get_raw_out.cmd_path, "--version"], stderr=STDOUT).decode('utf-8') + match = search('checksec v([\\w.]+),', version_output) + if match: + version = tuple(map(int, (match.group(1).split(".")))) + if version >= (2, 0): + return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, "--file=" + local_path]) + except Exception: + pass + return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, "--file", local_path]) @pwndbg.wrappers.OnlyWithCommand(cmd_name) def relro_status():
{"golden_diff": "diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py\n--- a/pwndbg/wrappers/checksec.py\n+++ b/pwndbg/wrappers/checksec.py\n@@ -5,6 +5,10 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import subprocess\n+from re import search\n+from subprocess import STDOUT\n+\n import pwndbg.commands\n import pwndbg.memoize\n import pwndbg.wrappers\n@@ -14,10 +18,17 @@\n @pwndbg.wrappers.OnlyWithCommand(cmd_name)\n @pwndbg.memoize.reset_on_objfile\n def get_raw_out():\n-\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n- cmd = [get_raw_out.cmd_path, \"--file\", local_path]\n- return pwndbg.wrappers.call_cmd(cmd)\n+ try:\n+ version_output = subprocess.check_output([get_raw_out.cmd_path, \"--version\"], stderr=STDOUT).decode('utf-8')\n+ match = search('checksec v([\\\\w.]+),', version_output)\n+ if match:\n+ version = tuple(map(int, (match.group(1).split(\".\"))))\n+ if version >= (2, 0):\n+ return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file=\" + local_path])\n+ except Exception:\n+ pass\n+ return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file\", local_path])\n \n @pwndbg.wrappers.OnlyWithCommand(cmd_name)\n def relro_status():\n", "issue": "subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/xxx']' returned non-zero exit status 2.\n```\r\npwndbg> checksec \r\nTraceback (most recent call last):\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 109, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 189, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/commands/checksec.py\", line 16, in checksec\r\n print(pwndbg.wrappers.checksec.get_raw_out())\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 189, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/wrappers/__init__.py\", line 28, in _OnlyWithCommand\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/memoize.py\", line 48, in __call__\r\n value = self.func(*args, **kwargs)\r\n File \"/root/pwndbg/pwndbg/wrappers/checksec.py\", line 20, in get_raw_out\r\n return pwndbg.wrappers.call_cmd(cmd)\r\n File \"/root/pwndbg/pwndbg/wrappers/__init__.py\", line 35, in call_cmd\r\n return subprocess.check_output(cmd, stderr=STDOUT).decode('utf-8')\r\n File \"/usr/lib/python3.7/subprocess.py\", line 395, in check_output\r\n **kwargs).stdout\r\n File \"/usr/lib/python3.7/subprocess.py\", line 487, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/forgot']' returned non-zero exit status 2.\r\n```\r\n\r\nwhen checksec running in python3 pwntools environment, raise a failure. Here's the reason:\r\n```\r\nroot@kali:/tmp# checksec --file forgot\r\nusage: checksec [-h] elf [elf ...]\r\nchecksec: error: unrecognized arguments: --file\r\n```\r\n\r\n\r\nSo modify pwndbg/pwndbg/wrappers/checksec.py: 19 with:\r\n`cmd = [get_raw_out.cmd_path, local_path]`\r\n\r\nThen it will be solved\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport pwndbg.commands\nimport pwndbg.memoize\nimport pwndbg.wrappers\n\ncmd_name = \"checksec\"\n\[email protected](cmd_name)\[email protected]_on_objfile\ndef get_raw_out():\n\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_raw_out.cmd_path, \"--file\", local_path]\n return pwndbg.wrappers.call_cmd(cmd)\n\[email protected](cmd_name)\ndef relro_status():\n relro = \"No RELRO\"\n out = get_raw_out()\n\n if \"Full RELRO\" in out:\n relro = \"Full RELRO\"\n elif \"Partial RELRO\" in out:\n relro = \"Partial RELRO\"\n\n return relro\n\[email protected](cmd_name)\ndef pie_status():\n pie = \"No PIE\"\n out = get_raw_out()\n\n if \"PIE enabled\" in out:\n pie = \"PIE enabled\"\n\n return pie\n", "path": "pwndbg/wrappers/checksec.py"}]}
1,459
361
gh_patches_debug_60955
rasdani/github-patches
git_diff
voicepaw__so-vits-svc-fork-1155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ensure proper sorting by name for output files ### Is your feature request related to a problem? Please describe. To enhance readability and ensure proper sorting by name, the numeric part of output file names should have a fixed width. This can be achieved by adding leading zeros to the numeric part, with four digits likely being sufficient. ### Describe alternatives you've considered I don't have any. ### Additional context _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct ### Are you willing to resolve this issue by submitting a Pull Request? - [X] Yes, I have the time, and I know how to start. - [ ] Yes, I have the time, but I don't know how to start. I would need guidance. - [ ] No, I don't have the time, although I believe I could do it if I had the time... - [ ] No, I don't have the time and I wouldn't even know how to start. </issue> <code> [start of src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py] 1 from __future__ import annotations 2 3 from collections import defaultdict 4 from logging import getLogger 5 from pathlib import Path 6 7 import librosa 8 import soundfile as sf 9 import torch 10 from joblib import Parallel, delayed 11 from pyannote.audio import Pipeline 12 from tqdm import tqdm 13 from tqdm_joblib import tqdm_joblib 14 15 LOG = getLogger(__name__) 16 17 18 def _process_one( 19 input_path: Path, 20 output_dir: Path, 21 sr: int, 22 *, 23 min_speakers: int = 1, 24 max_speakers: int = 1, 25 huggingface_token: str | None = None, 26 ) -> None: 27 try: 28 audio, sr = librosa.load(input_path, sr=sr, mono=True) 29 except Exception as e: 30 LOG.warning(f"Failed to read {input_path}: {e}") 31 return 32 pipeline = Pipeline.from_pretrained( 33 "pyannote/speaker-diarization", use_auth_token=huggingface_token 34 ) 35 if pipeline is None: 36 raise ValueError("Failed to load pipeline") 37 pipeline = pipeline.to(torch.device("cuda")) 38 LOG.info(f"Processing {input_path}. This may take a while...") 39 diarization = pipeline( 40 input_path, min_speakers=min_speakers, max_speakers=max_speakers 41 ) 42 43 LOG.info(f"Found {len(diarization)} tracks, writing to {output_dir}") 44 speaker_count = defaultdict(int) 45 46 output_dir.mkdir(parents=True, exist_ok=True) 47 for segment, track, speaker in tqdm( 48 list(diarization.itertracks(yield_label=True)), desc=f"Writing {input_path}" 49 ): 50 if segment.end - segment.start < 1: 51 continue 52 speaker_count[speaker] += 1 53 audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)] 54 sf.write( 55 (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"), 56 audio_cut, 57 sr, 58 ) 59 60 LOG.info(f"Speaker count: {speaker_count}") 61 62 63 def preprocess_speaker_diarization( 64 input_dir: Path | str, 65 output_dir: Path | str, 66 sr: int, 67 *, 68 min_speakers: int = 1, 69 max_speakers: int = 1, 70 huggingface_token: str | None = None, 71 n_jobs: int = -1, 72 ) -> None: 73 if huggingface_token is not None and not huggingface_token.startswith("hf_"): 74 LOG.warning("Huggingface token probably should start with hf_") 75 if not torch.cuda.is_available(): 76 LOG.warning("CUDA is not available. This will be extremely slow.") 77 input_dir = Path(input_dir) 78 output_dir = Path(output_dir) 79 input_dir.mkdir(parents=True, exist_ok=True) 80 output_dir.mkdir(parents=True, exist_ok=True) 81 input_paths = list(input_dir.rglob("*.*")) 82 with tqdm_joblib(desc="Preprocessing speaker diarization", total=len(input_paths)): 83 Parallel(n_jobs=n_jobs)( 84 delayed(_process_one)( 85 input_path, 86 output_dir / input_path.relative_to(input_dir).parent / input_path.stem, 87 sr, 88 max_speakers=max_speakers, 89 min_speakers=min_speakers, 90 huggingface_token=huggingface_token, 91 ) 92 for input_path in input_paths 93 ) 94 [end of src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py --- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py +++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py @@ -52,7 +52,7 @@ speaker_count[speaker] += 1 audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)] sf.write( - (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"), + (output_dir / f"{speaker}_{speaker_count[speaker]:04d}.wav"), audio_cut, sr, )
{"golden_diff": "diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n@@ -52,7 +52,7 @@\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n- (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n+ (output_dir / f\"{speaker}_{speaker_count[speaker]:04d}.wav\"),\n audio_cut,\n sr,\n )\n", "issue": "Ensure proper sorting by name for output files\n### Is your feature request related to a problem? Please describe.\n\nTo enhance readability and ensure proper sorting by name, the numeric part of output file names should have a fixed width. This can be achieved by adding leading zeros to the numeric part, with four digits likely being sufficient.\n\n### Describe alternatives you've considered\n\nI don't have any.\n\n### Additional context\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n\n### Are you willing to resolve this issue by submitting a Pull Request?\n\n- [X] Yes, I have the time, and I know how to start.\n- [ ] Yes, I have the time, but I don't know how to start. I would need guidance.\n- [ ] No, I don't have the time, although I believe I could do it if I had the time...\n- [ ] No, I don't have the time and I wouldn't even know how to start.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport librosa\nimport soundfile as sf\nimport torch\nfrom joblib import Parallel, delayed\nfrom pyannote.audio import Pipeline\nfrom tqdm import tqdm\nfrom tqdm_joblib import tqdm_joblib\n\nLOG = getLogger(__name__)\n\n\ndef _process_one(\n input_path: Path,\n output_dir: Path,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n) -> None:\n try:\n audio, sr = librosa.load(input_path, sr=sr, mono=True)\n except Exception as e:\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n pipeline = pipeline.to(torch.device(\"cuda\"))\n LOG.info(f\"Processing {input_path}. This may take a while...\")\n diarization = pipeline(\n input_path, min_speakers=min_speakers, max_speakers=max_speakers\n )\n\n LOG.info(f\"Found {len(diarization)} tracks, writing to {output_dir}\")\n speaker_count = defaultdict(int)\n\n output_dir.mkdir(parents=True, exist_ok=True)\n for segment, track, speaker in tqdm(\n list(diarization.itertracks(yield_label=True)), desc=f\"Writing {input_path}\"\n ):\n if segment.end - segment.start < 1:\n continue\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n audio_cut,\n sr,\n )\n\n LOG.info(f\"Speaker count: {speaker_count}\")\n\n\ndef preprocess_speaker_diarization(\n input_dir: Path | str,\n output_dir: Path | str,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n n_jobs: int = -1,\n) -> None:\n if huggingface_token is not None and not huggingface_token.startswith(\"hf_\"):\n LOG.warning(\"Huggingface token probably should start with hf_\")\n if not torch.cuda.is_available():\n LOG.warning(\"CUDA is not available. This will be extremely slow.\")\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n input_dir.mkdir(parents=True, exist_ok=True)\n output_dir.mkdir(parents=True, exist_ok=True)\n input_paths = list(input_dir.rglob(\"*.*\"))\n with tqdm_joblib(desc=\"Preprocessing speaker diarization\", total=len(input_paths)):\n Parallel(n_jobs=n_jobs)(\n delayed(_process_one)(\n input_path,\n output_dir / input_path.relative_to(input_dir).parent / input_path.stem,\n sr,\n max_speakers=max_speakers,\n min_speakers=min_speakers,\n huggingface_token=huggingface_token,\n )\n for input_path in input_paths\n )\n", "path": "src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py"}]}
1,678
177
gh_patches_debug_6308
rasdani/github-patches
git_diff
DataBiosphere__toil-2583
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Env value of TOIL_DOCKER_REGISTRY is not honored in the docker image When a docker image is build with a custom `TOIL_DOCKER_REGISTRY`, the built docker image still looks at `quay.io/ucsc_cgl` for pulling related images (e.g. `mtail`, `grafana`, etc). The fix is to put the actual registry in `version_template.py`. </issue> <code> [start of version_template.py] 1 # Copyright (C) 2015-2018 Regents of the University of California 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """This script is a template for src/toil/version.py. Running it without arguments echoes all 16 globals, i.e. module attributes. Constant assignments will be echoed verbatim while callables 17 will be invoked and their result echoed as an assignment using the function name as the left-hand 18 side and the return value of the function as right-hand side. To prevent a module attribute from 19 being echoed, start or end the attribute name with an underscore. To print the value of a single 20 symbol, pass the name of that attribute to the script as a command line argument. You can also 21 import the expand_ function and invoke it directly with either no or exactly one argument.""" 22 23 # Note to maintainers: 24 # 25 # - don't import at module level unless you want the imported value to be included in the output 26 # - only import from the Python standard run-time library (you can't have any dependencies) 27 28 29 baseVersion = '3.20.0a1' 30 cgcloudVersion = '1.6.0a1.dev393' 31 dockerRegistry = 'quay.io/ucsc_cgl' 32 dockerName = 'toil' 33 34 35 def version(): 36 """ 37 A version identifier that includes the full-legth commit SHA1 and an optional suffix to 38 indicate that the working copy is dirty. 39 """ 40 return _version() 41 42 43 def shortVersion(): 44 """ 45 A version identifier that includes the abbreviated commit SHA1 and an optional suffix to 46 indicate that the working copy is dirty. 47 """ 48 return _version(shorten=True) 49 50 51 def _version(shorten=False): 52 return '-'.join(filter(None, [distVersion(), 53 currentCommit()[:7 if shorten else None], 54 ('dirty' if dirty() else None)])) 55 56 57 def distVersion(): 58 """ 59 The distribution version identifying a published release on PyPI. 60 """ 61 from pkg_resources import parse_version 62 build_number = buildNumber() 63 parsedBaseVersion = parse_version(baseVersion) 64 if isinstance(parsedBaseVersion, tuple): 65 raise RuntimeError("Setuptools version 8.0 or newer required. Update by running " 66 "'pip install setuptools --upgrade'") 67 68 if build_number is not None and parsedBaseVersion.is_prerelease: 69 return baseVersion + '.dev' + build_number 70 else: 71 return baseVersion 72 73 74 def dockerTag(): 75 """ 76 The primary tag of the Docker image for the appliance. This uniquely identifies the appliance 77 image. 78 """ 79 return version() 80 81 82 def dockerShortTag(): 83 """ 84 A secondary, shortened form of :func:`dockerTag` with which to tag the appliance image for 85 convenience. 86 """ 87 return shortVersion() 88 89 90 def dockerMinimalTag(): 91 """ 92 A minimal tag with which to tag the appliance image for convenience. Does not include 93 information about the git commit or working copy dirtyness. 94 """ 95 return distVersion() 96 97 98 def buildNumber(): 99 """ 100 The Jenkins build number, if defined, else None. 101 """ 102 import os 103 return os.getenv('BUILD_NUMBER') 104 105 106 def currentCommit(): 107 from subprocess import check_output 108 try: 109 output = check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).decode('utf-8').split()[0] 110 except: 111 # Return this we are not in a git environment. 112 return '000' 113 if isinstance(output, bytes): 114 return output.decode('utf-8') 115 return str(output) 116 117 118 def dirty(): 119 from subprocess import call 120 try: 121 return 0 != call('(git diff --exit-code ' 122 '&& git diff --cached --exit-code) > /dev/null', shell=True) 123 except: 124 return False # In case the git call fails. 125 126 127 def expand_(name=None): 128 variables = {k: v for k, v in globals().items() 129 if not k.startswith('_') and not k.endswith('_')} 130 131 def resolve(k): 132 v = variables[k] 133 if callable(v): 134 v = v() 135 return v 136 137 if name is None: 138 return ''.join("%s = %s\n" % (k, repr(resolve(k))) for k, v in variables.items()) 139 else: 140 return resolve(name) 141 142 143 def _main(): 144 import sys 145 sys.stdout.write(expand_(*sys.argv[1:])) 146 147 148 if __name__ == '__main__': 149 _main() 150 [end of version_template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/version_template.py b/version_template.py --- a/version_template.py +++ b/version_template.py @@ -28,7 +28,6 @@ baseVersion = '3.20.0a1' cgcloudVersion = '1.6.0a1.dev393' -dockerRegistry = 'quay.io/ucsc_cgl' dockerName = 'toil' @@ -115,6 +114,11 @@ return str(output) +def dockerRegistry(): + import os + return os.getenv('TOIL_DOCKER_REGISTRY', 'quay.io/ucsc_cgl') + + def dirty(): from subprocess import call try:
{"golden_diff": "diff --git a/version_template.py b/version_template.py\n--- a/version_template.py\n+++ b/version_template.py\n@@ -28,7 +28,6 @@\n \n baseVersion = '3.20.0a1'\n cgcloudVersion = '1.6.0a1.dev393'\n-dockerRegistry = 'quay.io/ucsc_cgl'\n dockerName = 'toil'\n \n \n@@ -115,6 +114,11 @@\n return str(output)\n \n \n+def dockerRegistry():\n+ import os\n+ return os.getenv('TOIL_DOCKER_REGISTRY', 'quay.io/ucsc_cgl')\n+\n+\n def dirty():\n from subprocess import call\n try:\n", "issue": "Env value of TOIL_DOCKER_REGISTRY is not honored in the docker image\nWhen a docker image is build with a custom `TOIL_DOCKER_REGISTRY`, the built docker image still looks at `quay.io/ucsc_cgl` for pulling related images (e.g. `mtail`, `grafana`, etc). The fix is to put the actual registry in `version_template.py`.\n", "before_files": [{"content": "# Copyright (C) 2015-2018 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is a template for src/toil/version.py. Running it without arguments echoes all\nglobals, i.e. module attributes. Constant assignments will be echoed verbatim while callables\nwill be invoked and their result echoed as an assignment using the function name as the left-hand\nside and the return value of the function as right-hand side. To prevent a module attribute from\nbeing echoed, start or end the attribute name with an underscore. To print the value of a single\nsymbol, pass the name of that attribute to the script as a command line argument. You can also\nimport the expand_ function and invoke it directly with either no or exactly one argument.\"\"\"\n\n# Note to maintainers:\n#\n# - don't import at module level unless you want the imported value to be included in the output\n# - only import from the Python standard run-time library (you can't have any dependencies)\n\n\nbaseVersion = '3.20.0a1'\ncgcloudVersion = '1.6.0a1.dev393'\ndockerRegistry = 'quay.io/ucsc_cgl'\ndockerName = 'toil'\n\n\ndef version():\n \"\"\"\n A version identifier that includes the full-legth commit SHA1 and an optional suffix to\n indicate that the working copy is dirty.\n \"\"\"\n return _version()\n\n\ndef shortVersion():\n \"\"\"\n A version identifier that includes the abbreviated commit SHA1 and an optional suffix to\n indicate that the working copy is dirty.\n \"\"\"\n return _version(shorten=True)\n\n\ndef _version(shorten=False):\n return '-'.join(filter(None, [distVersion(),\n currentCommit()[:7 if shorten else None],\n ('dirty' if dirty() else None)]))\n\n\ndef distVersion():\n \"\"\"\n The distribution version identifying a published release on PyPI.\n \"\"\"\n from pkg_resources import parse_version\n build_number = buildNumber()\n parsedBaseVersion = parse_version(baseVersion)\n if isinstance(parsedBaseVersion, tuple):\n raise RuntimeError(\"Setuptools version 8.0 or newer required. Update by running \"\n \"'pip install setuptools --upgrade'\")\n\n if build_number is not None and parsedBaseVersion.is_prerelease:\n return baseVersion + '.dev' + build_number\n else:\n return baseVersion\n\n\ndef dockerTag():\n \"\"\"\n The primary tag of the Docker image for the appliance. This uniquely identifies the appliance\n image.\n \"\"\"\n return version()\n\n\ndef dockerShortTag():\n \"\"\"\n A secondary, shortened form of :func:`dockerTag` with which to tag the appliance image for\n convenience.\n \"\"\"\n return shortVersion()\n\n\ndef dockerMinimalTag():\n \"\"\"\n A minimal tag with which to tag the appliance image for convenience. Does not include\n information about the git commit or working copy dirtyness.\n \"\"\"\n return distVersion()\n\n\ndef buildNumber():\n \"\"\"\n The Jenkins build number, if defined, else None.\n \"\"\"\n import os\n return os.getenv('BUILD_NUMBER')\n\n\ndef currentCommit():\n from subprocess import check_output\n try:\n output = check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).decode('utf-8').split()[0]\n except:\n # Return this we are not in a git environment.\n return '000'\n if isinstance(output, bytes):\n return output.decode('utf-8')\n return str(output)\n\n\ndef dirty():\n from subprocess import call\n try:\n return 0 != call('(git diff --exit-code '\n '&& git diff --cached --exit-code) > /dev/null', shell=True)\n except:\n return False # In case the git call fails.\n\n\ndef expand_(name=None):\n variables = {k: v for k, v in globals().items()\n if not k.startswith('_') and not k.endswith('_')}\n\n def resolve(k):\n v = variables[k]\n if callable(v):\n v = v()\n return v\n\n if name is None:\n return ''.join(\"%s = %s\\n\" % (k, repr(resolve(k))) for k, v in variables.items())\n else:\n return resolve(name)\n\n\ndef _main():\n import sys\n sys.stdout.write(expand_(*sys.argv[1:]))\n\n\nif __name__ == '__main__':\n _main()\n", "path": "version_template.py"}]}
2,041
158
gh_patches_debug_21900
rasdani/github-patches
git_diff
fidals__shopelectro-928
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Get rid of pack prices multiplication PO said that 1C stock now is giving already multiplied prices </issue> <code> [start of shopelectro/management/commands/_update_catalog/update_pack.py] 1 """ 2 Update Product.in_pack and prices. 3 4 The update_catalog command always resets product prices to per unit format, so: 5 1. Parse in pack quantity from Tag.name and save it to Product.in_pack 6 2. Multiply product prices by in_pack value and save. 7 """ 8 import logging 9 10 from django.conf import settings 11 from django.db import models, transaction 12 13 from catalog.models_expressions import Substring 14 15 from shopelectro.exception import UpdateCatalogException 16 from shopelectro.models import TagQuerySet, TagGroup 17 18 logger = logging.getLogger(__name__) 19 PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large'] 20 21 22 def find_pack_group() -> TagGroup: 23 try: 24 pack_group = TagGroup.objects.get_pack() 25 except TagGroup.DoesNotExist as error: 26 raise UpdateCatalogException( 27 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID. ' 28 + str(error) 29 ) 30 if settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower(): 31 raise UpdateCatalogException( 32 'The pack group name isn\'t matched with the set name:' 33 f' Pack group name: {pack_group.name}\n' 34 f' Set name: {settings.PACK_GROUP_NAME}\n' 35 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.' 36 ) 37 38 return pack_group 39 40 41 def update_in_packs(packs: TagQuerySet): 42 """Parse and save in pack quantity values.""" 43 packs = ( 44 packs 45 .annotate( 46 in_pack_str=Substring( 47 models.F('name'), 48 models.Value('[0-9]+\+?[0-9]*'))) 49 .exclude(in_pack_str__exact='') 50 ) 51 52 for pack in packs: 53 in_pack = sum(map(int, pack.in_pack_str.split('+'))) 54 pack.products.all().update(in_pack=max(in_pack, 1)) 55 56 57 def update_prices(packs: TagQuerySet): 58 """Multiply product prices on in pack quantity.""" 59 fields_to_update = {} 60 for price in PRICES: 61 fields_to_update[price] = models.F(price) * models.F('in_pack') 62 63 with transaction.atomic(): 64 packs.products().update(**fields_to_update) 65 66 67 def main(*args, **kwargs): 68 packs = find_pack_group().tags.all().prefetch_related('products') 69 update_in_packs(packs) 70 update_prices(packs) 71 [end of shopelectro/management/commands/_update_catalog/update_pack.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py --- a/shopelectro/management/commands/_update_catalog/update_pack.py +++ b/shopelectro/management/commands/_update_catalog/update_pack.py @@ -8,10 +8,9 @@ import logging from django.conf import settings -from django.db import models, transaction +from django.db import models from catalog.models_expressions import Substring - from shopelectro.exception import UpdateCatalogException from shopelectro.models import TagQuerySet, TagGroup @@ -54,17 +53,6 @@ pack.products.all().update(in_pack=max(in_pack, 1)) -def update_prices(packs: TagQuerySet): - """Multiply product prices on in pack quantity.""" - fields_to_update = {} - for price in PRICES: - fields_to_update[price] = models.F(price) * models.F('in_pack') - - with transaction.atomic(): - packs.products().update(**fields_to_update) - - def main(*args, **kwargs): packs = find_pack_group().tags.all().prefetch_related('products') update_in_packs(packs) - update_prices(packs)
{"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -8,10 +8,9 @@\n import logging\n \n from django.conf import settings\n-from django.db import models, transaction\n+from django.db import models\n \n from catalog.models_expressions import Substring\n-\n from shopelectro.exception import UpdateCatalogException\n from shopelectro.models import TagQuerySet, TagGroup\n \n@@ -54,17 +53,6 @@\n pack.products.all().update(in_pack=max(in_pack, 1))\n \n \n-def update_prices(packs: TagQuerySet):\n- \"\"\"Multiply product prices on in pack quantity.\"\"\"\n- fields_to_update = {}\n- for price in PRICES:\n- fields_to_update[price] = models.F(price) * models.F('in_pack')\n-\n- with transaction.atomic():\n- packs.products().update(**fields_to_update)\n-\n-\n def main(*args, **kwargs):\n packs = find_pack_group().tags.all().prefetch_related('products')\n update_in_packs(packs)\n- update_prices(packs)\n", "issue": "Get rid of pack prices multiplication\nPO said that 1C stock now is giving already multiplied prices\n", "before_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.db import models, transaction\n\nfrom catalog.models_expressions import Substring\n\nfrom shopelectro.exception import UpdateCatalogException\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef find_pack_group() -> TagGroup:\n try:\n pack_group = TagGroup.objects.get_pack()\n except TagGroup.DoesNotExist as error:\n raise UpdateCatalogException(\n 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID. '\n + str(error)\n )\n if settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():\n raise UpdateCatalogException(\n 'The pack group name isn\\'t matched with the set name:'\n f' Pack group name: {pack_group.name}\\n'\n f' Set name: {settings.PACK_GROUP_NAME}\\n'\n 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.'\n )\n\n return pack_group\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n packs = (\n packs\n .annotate(\n in_pack_str=Substring(\n models.F('name'),\n models.Value('[0-9]+\\+?[0-9]*')))\n .exclude(in_pack_str__exact='')\n )\n\n for pack in packs:\n in_pack = sum(map(int, pack.in_pack_str.split('+')))\n pack.products.all().update(in_pack=max(in_pack, 1))\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, **kwargs):\n packs = find_pack_group().tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]}
1,226
283
gh_patches_debug_6604
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-tf-222
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> assert_state_is_compatible() cannot detect dimension difference between encoder_state and decoder_zero_state when encoder and decoder dimensions are not the same in NMTSmall model I just followed the instructions on the page [http://opennmt.net/OpenNMT-tf/quickstart.html](http://opennmt.net/OpenNMT-tf/quickstart.html) and played around a little bit with the NMTSmall model by setting a different `num_units` value to the `UnidirectionalRNNEncoder`, say `256`, which is different from the `512` for the `AttentionalRNNDecoder`. This line https://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L56 in the `CopyBridge` did not throw any error, even though the `encoder_state` and `decoder_zero_state` do not have the same dimensions, `256` vs `512`. It probably natual for someone to think of using the `DenseBridge` when dimensions are set differently. However, the `CopyBridge` should be throwing some errors in such misusage cases here, instead of letting one to figure out that with some error message as follows `ValueError: Dimensions must be equal, but are 1280 and 1536 for 'seq2seq/parallel_0/seq2seq/decoder_1/decoder/while/BasicDecoderStep/decoder/attention_wrapper/attention_wrapper/multi_rnn_cell/cell_0/lstm_cell/MatMul' (op: 'MatMul') with input shapes: [?,1280], [1536,2048].` Can anyone please explain why the https://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L28 passed without an issue? Thanks! </issue> <code> [start of opennmt/layers/bridge.py] 1 """Define bridges: logic of passing the encoder state to the decoder.""" 2 3 import abc 4 import six 5 6 import tensorflow as tf 7 8 9 def assert_state_is_compatible(expected_state, state): 10 """Asserts that states are compatible. 11 12 Args: 13 expected_state: The reference state. 14 state: The state that must be compatible with :obj:`expected_state`. 15 16 Raises: 17 ValueError: if the states are incompatible. 18 """ 19 # Check structure compatibility. 20 tf.contrib.framework.nest.assert_same_structure(expected_state, state) 21 22 # Check shape compatibility. 23 expected_state_flat = tf.contrib.framework.nest.flatten(expected_state) 24 state_flat = tf.contrib.framework.nest.flatten(state) 25 26 for x, y in zip(expected_state_flat, state_flat): 27 if tf.contrib.framework.is_tensor(x): 28 tf.contrib.framework.with_same_shape(x, y) 29 30 31 @six.add_metaclass(abc.ABCMeta) 32 class Bridge(object): 33 """Base class for bridges.""" 34 35 def __call__(self, encoder_state, decoder_zero_state): 36 """Returns the initial decoder state. 37 38 Args: 39 encoder_state: The encoder state. 40 decoder_zero_state: The default decoder state. 41 42 Returns: 43 The decoder initial state. 44 """ 45 return self._build(encoder_state, decoder_zero_state) 46 47 @abc.abstractmethod 48 def _build(self, encoder_state, decoder_zero_state): 49 raise NotImplementedError() 50 51 52 class CopyBridge(Bridge): 53 """A bridge that passes the encoder state as is.""" 54 55 def _build(self, encoder_state, decoder_zero_state): 56 assert_state_is_compatible(decoder_zero_state, encoder_state) 57 return encoder_state 58 59 60 class ZeroBridge(Bridge): 61 """A bridge that does not pass information from the encoder.""" 62 63 def _build(self, encoder_state, decoder_zero_state): 64 # Simply return the default decoder state. 65 return decoder_zero_state 66 67 68 class DenseBridge(Bridge): 69 """A bridge that applies a parameterized linear transformation from the 70 encoder state to the decoder state size. 71 """ 72 73 def __init__(self, activation=None): 74 """Initializes the bridge. 75 76 Args: 77 activation: Activation function (a callable). 78 Set it to ``None`` to maintain a linear activation. 79 """ 80 self.activation = activation 81 82 def _build(self, encoder_state, decoder_zero_state): 83 # Flattened states. 84 encoder_state_flat = tf.contrib.framework.nest.flatten(encoder_state) 85 decoder_state_flat = tf.contrib.framework.nest.flatten(decoder_zero_state) 86 87 # View encoder state as a single tensor. 88 encoder_state_concat = tf.concat(encoder_state_flat, 1) 89 90 # Extract decoder state sizes. 91 decoder_state_size = [] 92 for tensor in decoder_state_flat: 93 decoder_state_size.append(tensor.get_shape().as_list()[-1]) 94 95 decoder_total_size = sum(decoder_state_size) 96 97 # Apply linear transformation. 98 transformed = tf.layers.dense( 99 encoder_state_concat, 100 decoder_total_size, 101 activation=self.activation) 102 103 # Split resulting tensor to match the decoder state size. 104 splitted = tf.split(transformed, decoder_state_size, axis=1) 105 106 # Pack as the origial decoder state. 107 return tf.contrib.framework.nest.pack_sequence_as(decoder_zero_state, splitted) 108 [end of opennmt/layers/bridge.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opennmt/layers/bridge.py b/opennmt/layers/bridge.py --- a/opennmt/layers/bridge.py +++ b/opennmt/layers/bridge.py @@ -25,7 +25,11 @@ for x, y in zip(expected_state_flat, state_flat): if tf.contrib.framework.is_tensor(x): - tf.contrib.framework.with_same_shape(x, y) + expected_depth = x.get_shape().as_list()[-1] + depth = y.get_shape().as_list()[-1] + if depth != expected_depth: + raise ValueError("Tensor %s in state has shape %s which is incompatible " + "with the target shape %s" % (y.name, y.shape, x.shape)) @six.add_metaclass(abc.ABCMeta)
{"golden_diff": "diff --git a/opennmt/layers/bridge.py b/opennmt/layers/bridge.py\n--- a/opennmt/layers/bridge.py\n+++ b/opennmt/layers/bridge.py\n@@ -25,7 +25,11 @@\n \n for x, y in zip(expected_state_flat, state_flat):\n if tf.contrib.framework.is_tensor(x):\n- tf.contrib.framework.with_same_shape(x, y)\n+ expected_depth = x.get_shape().as_list()[-1]\n+ depth = y.get_shape().as_list()[-1]\n+ if depth != expected_depth:\n+ raise ValueError(\"Tensor %s in state has shape %s which is incompatible \"\n+ \"with the target shape %s\" % (y.name, y.shape, x.shape))\n \n \n @six.add_metaclass(abc.ABCMeta)\n", "issue": "assert_state_is_compatible() cannot detect dimension difference between encoder_state and decoder_zero_state when encoder and decoder dimensions are not the same in NMTSmall model\nI just followed the instructions on the page [http://opennmt.net/OpenNMT-tf/quickstart.html](http://opennmt.net/OpenNMT-tf/quickstart.html) and played around a little bit with the NMTSmall model by setting a different `num_units` value to the `UnidirectionalRNNEncoder`, say `256`, which is different from the `512` for the `AttentionalRNNDecoder`. \r\n\r\nThis line\r\n\r\nhttps://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L56\r\n\r\nin the `CopyBridge` did not throw any error, even though the `encoder_state` and `decoder_zero_state` do not have the same dimensions, `256` vs `512`.\r\n\r\nIt probably natual for someone to think of using the `DenseBridge` when dimensions are set differently. However, the `CopyBridge` should be throwing some errors in such misusage cases here, instead of letting one to figure out that with some error message as follows\r\n\r\n`ValueError: Dimensions must be equal, but are 1280 and 1536 for 'seq2seq/parallel_0/seq2seq/decoder_1/decoder/while/BasicDecoderStep/decoder/attention_wrapper/attention_wrapper/multi_rnn_cell/cell_0/lstm_cell/MatMul' (op: 'MatMul') with input shapes: [?,1280], [1536,2048].`\r\n\r\nCan anyone please explain why the \r\nhttps://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L28\r\npassed without an issue?\r\n\r\nThanks!\n", "before_files": [{"content": "\"\"\"Define bridges: logic of passing the encoder state to the decoder.\"\"\"\n\nimport abc\nimport six\n\nimport tensorflow as tf\n\n\ndef assert_state_is_compatible(expected_state, state):\n \"\"\"Asserts that states are compatible.\n\n Args:\n expected_state: The reference state.\n state: The state that must be compatible with :obj:`expected_state`.\n\n Raises:\n ValueError: if the states are incompatible.\n \"\"\"\n # Check structure compatibility.\n tf.contrib.framework.nest.assert_same_structure(expected_state, state)\n\n # Check shape compatibility.\n expected_state_flat = tf.contrib.framework.nest.flatten(expected_state)\n state_flat = tf.contrib.framework.nest.flatten(state)\n\n for x, y in zip(expected_state_flat, state_flat):\n if tf.contrib.framework.is_tensor(x):\n tf.contrib.framework.with_same_shape(x, y)\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Bridge(object):\n \"\"\"Base class for bridges.\"\"\"\n\n def __call__(self, encoder_state, decoder_zero_state):\n \"\"\"Returns the initial decoder state.\n\n Args:\n encoder_state: The encoder state.\n decoder_zero_state: The default decoder state.\n\n Returns:\n The decoder initial state.\n \"\"\"\n return self._build(encoder_state, decoder_zero_state)\n\n @abc.abstractmethod\n def _build(self, encoder_state, decoder_zero_state):\n raise NotImplementedError()\n\n\nclass CopyBridge(Bridge):\n \"\"\"A bridge that passes the encoder state as is.\"\"\"\n\n def _build(self, encoder_state, decoder_zero_state):\n assert_state_is_compatible(decoder_zero_state, encoder_state)\n return encoder_state\n\n\nclass ZeroBridge(Bridge):\n \"\"\"A bridge that does not pass information from the encoder.\"\"\"\n\n def _build(self, encoder_state, decoder_zero_state):\n # Simply return the default decoder state.\n return decoder_zero_state\n\n\nclass DenseBridge(Bridge):\n \"\"\"A bridge that applies a parameterized linear transformation from the\n encoder state to the decoder state size.\n \"\"\"\n\n def __init__(self, activation=None):\n \"\"\"Initializes the bridge.\n\n Args:\n activation: Activation function (a callable).\n Set it to ``None`` to maintain a linear activation.\n \"\"\"\n self.activation = activation\n\n def _build(self, encoder_state, decoder_zero_state):\n # Flattened states.\n encoder_state_flat = tf.contrib.framework.nest.flatten(encoder_state)\n decoder_state_flat = tf.contrib.framework.nest.flatten(decoder_zero_state)\n\n # View encoder state as a single tensor.\n encoder_state_concat = tf.concat(encoder_state_flat, 1)\n\n # Extract decoder state sizes.\n decoder_state_size = []\n for tensor in decoder_state_flat:\n decoder_state_size.append(tensor.get_shape().as_list()[-1])\n\n decoder_total_size = sum(decoder_state_size)\n\n # Apply linear transformation.\n transformed = tf.layers.dense(\n encoder_state_concat,\n decoder_total_size,\n activation=self.activation)\n\n # Split resulting tensor to match the decoder state size.\n splitted = tf.split(transformed, decoder_state_size, axis=1)\n\n # Pack as the origial decoder state.\n return tf.contrib.framework.nest.pack_sequence_as(decoder_zero_state, splitted)\n", "path": "opennmt/layers/bridge.py"}]}
1,927
182
gh_patches_debug_17593
rasdani/github-patches
git_diff
OpenMined__PySyft-5122
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add DCGAN example Duet Notebooks ## Description Add two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist): https://github.com/pytorch/examples/blob/master/dcgan/ ## Definition of Done The partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook. Add Super Resolution Example Duet Notebooks ## Description Add two notebook's which reflect the Super Resolution example split into DO (Data Owner) and DS (Data Scientist): https://github.com/pytorch/examples/blob/master/super_resolution/ ## Definition of Done The partially runnable Super Resolution example notebooks should be in the examples/duet/super_resolution folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook. Add DCGAN example Duet Notebooks ## Description Add two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist): https://github.com/pytorch/examples/blob/master/dcgan/ ## Definition of Done The partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook. </issue> <code> [start of src/syft/lib/torch/__init__.py] 1 # stdlib 2 from typing import Any 3 from typing import Dict 4 from typing import Union 5 6 # third party 7 from packaging import version 8 import torch 9 10 # syft relative 11 from . import device # noqa: 401 12 from . import parameter # noqa: 401 13 from . import uppercase_tensor # noqa: 401 14 from ...ast.globals import Globals 15 from ...logger import critical 16 from .allowlist import allowlist 17 18 TORCH_VERSION = version.parse(torch.__version__.split("+")[0]) 19 20 21 def get_return_type(support_dict: Union[str, Dict[str, str]]) -> str: 22 if isinstance(support_dict, str): 23 return support_dict 24 else: 25 return support_dict["return_type"] 26 27 28 def version_supported(support_dict: Union[str, Dict[str, str]]) -> bool: 29 if isinstance(support_dict, str): 30 return True 31 else: 32 # if we are on either side of the min or max versions we don't support this op 33 if "min_version" in support_dict and TORCH_VERSION < version.parse( 34 support_dict["min_version"] 35 ): 36 return False 37 if "max_version" in support_dict and TORCH_VERSION > version.parse( 38 support_dict["max_version"] 39 ): 40 return False 41 return True 42 43 44 def create_torch_ast(client: Any = None) -> Globals: 45 ast = Globals(client) 46 47 # most methods work in all versions and have a single return type 48 # for the more complicated ones we pass a dict with keys like return_type and 49 # min_version 50 for method, return_type_name_or_dict in allowlist.items(): 51 if version_supported(support_dict=return_type_name_or_dict): 52 return_type = get_return_type(support_dict=return_type_name_or_dict) 53 if return_type == "unknown": 54 # this allows us to import them for testing 55 continue 56 ast.add_path( 57 path=method, framework_reference=torch, return_type_name=return_type 58 ) 59 # add all the torch.nn.Parameter hooks 60 if method.startswith("torch.Tensor."): 61 method = method.replace("torch.Tensor.", "torch.nn.Parameter.") 62 return_type = return_type.replace("torch.Tensor", "torch.nn.Parameter") 63 ast.add_path( 64 path=method, framework_reference=torch, return_type_name=return_type 65 ) 66 else: 67 critical(f"Skipping {method} not supported in {TORCH_VERSION}") 68 69 for klass in ast.classes: 70 klass.create_pointer_class() 71 klass.create_send_method() 72 klass.create_storable_object_attr_convenience_methods() 73 return ast 74 [end of src/syft/lib/torch/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/syft/lib/torch/__init__.py b/src/syft/lib/torch/__init__.py --- a/src/syft/lib/torch/__init__.py +++ b/src/syft/lib/torch/__init__.py @@ -12,7 +12,7 @@ from . import parameter # noqa: 401 from . import uppercase_tensor # noqa: 401 from ...ast.globals import Globals -from ...logger import critical +from ...logger import info from .allowlist import allowlist TORCH_VERSION = version.parse(torch.__version__.split("+")[0]) @@ -64,7 +64,7 @@ path=method, framework_reference=torch, return_type_name=return_type ) else: - critical(f"Skipping {method} not supported in {TORCH_VERSION}") + info(f"Skipping {method} not supported in {TORCH_VERSION}") for klass in ast.classes: klass.create_pointer_class()
{"golden_diff": "diff --git a/src/syft/lib/torch/__init__.py b/src/syft/lib/torch/__init__.py\n--- a/src/syft/lib/torch/__init__.py\n+++ b/src/syft/lib/torch/__init__.py\n@@ -12,7 +12,7 @@\n from . import parameter # noqa: 401\n from . import uppercase_tensor # noqa: 401\n from ...ast.globals import Globals\n-from ...logger import critical\n+from ...logger import info\n from .allowlist import allowlist\n \n TORCH_VERSION = version.parse(torch.__version__.split(\"+\")[0])\n@@ -64,7 +64,7 @@\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n- critical(f\"Skipping {method} not supported in {TORCH_VERSION}\")\n+ info(f\"Skipping {method} not supported in {TORCH_VERSION}\")\n \n for klass in ast.classes:\n klass.create_pointer_class()\n", "issue": "Add DCGAN example Duet Notebooks\n## Description\r\nAdd two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist):\r\nhttps://github.com/pytorch/examples/blob/master/dcgan/\r\n\r\n## Definition of Done\r\nThe partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook.\nAdd Super Resolution Example Duet Notebooks\n## Description\r\nAdd two notebook's which reflect the Super Resolution example split into DO (Data Owner) and DS (Data Scientist):\r\nhttps://github.com/pytorch/examples/blob/master/super_resolution/\r\n\r\n## Definition of Done\r\nThe partially runnable Super Resolution example notebooks should be in the examples/duet/super_resolution folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook.\nAdd DCGAN example Duet Notebooks\n## Description\r\nAdd two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist):\r\nhttps://github.com/pytorch/examples/blob/master/dcgan/\r\n\r\n## Definition of Done\r\nThe partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook.\n", "before_files": [{"content": "# stdlib\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Union\n\n# third party\nfrom packaging import version\nimport torch\n\n# syft relative\nfrom . import device # noqa: 401\nfrom . import parameter # noqa: 401\nfrom . import uppercase_tensor # noqa: 401\nfrom ...ast.globals import Globals\nfrom ...logger import critical\nfrom .allowlist import allowlist\n\nTORCH_VERSION = version.parse(torch.__version__.split(\"+\")[0])\n\n\ndef get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:\n if isinstance(support_dict, str):\n return support_dict\n else:\n return support_dict[\"return_type\"]\n\n\ndef version_supported(support_dict: Union[str, Dict[str, str]]) -> bool:\n if isinstance(support_dict, str):\n return True\n else:\n # if we are on either side of the min or max versions we don't support this op\n if \"min_version\" in support_dict and TORCH_VERSION < version.parse(\n support_dict[\"min_version\"]\n ):\n return False\n if \"max_version\" in support_dict and TORCH_VERSION > version.parse(\n support_dict[\"max_version\"]\n ):\n return False\n return True\n\n\ndef create_torch_ast(client: Any = None) -> Globals:\n ast = Globals(client)\n\n # most methods work in all versions and have a single return type\n # for the more complicated ones we pass a dict with keys like return_type and\n # min_version\n for method, return_type_name_or_dict in allowlist.items():\n if version_supported(support_dict=return_type_name_or_dict):\n return_type = get_return_type(support_dict=return_type_name_or_dict)\n if return_type == \"unknown\":\n # this allows us to import them for testing\n continue\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n # add all the torch.nn.Parameter hooks\n if method.startswith(\"torch.Tensor.\"):\n method = method.replace(\"torch.Tensor.\", \"torch.nn.Parameter.\")\n return_type = return_type.replace(\"torch.Tensor\", \"torch.nn.Parameter\")\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n critical(f\"Skipping {method} not supported in {TORCH_VERSION}\")\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_storable_object_attr_convenience_methods()\n return ast\n", "path": "src/syft/lib/torch/__init__.py"}]}
1,547
221
gh_patches_debug_11307
rasdani/github-patches
git_diff
ivy-llc__ivy-15802
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> asin </issue> <code> [start of ivy/functional/frontends/paddle/tensor/math.py] 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 ) 7 8 9 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 10 @to_ivy_arrays_and_back 11 def sin(x, name=None): 12 return ivy.sin(x) 13 14 15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 16 @to_ivy_arrays_and_back 17 def cos(x, name=None): 18 return ivy.cos(x) 19 20 21 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 22 @to_ivy_arrays_and_back 23 def acos(x, name=None): 24 return ivy.acos(x) 25 26 27 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 28 @to_ivy_arrays_and_back 29 def cosh(x, name=None): 30 return ivy.cosh(x) 31 32 33 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 34 @to_ivy_arrays_and_back 35 def tanh(x, name=None): 36 return ivy.tanh(x) 37 38 39 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 40 @to_ivy_arrays_and_back 41 def acosh(x, name=None): 42 return ivy.acosh(x) 43 [end of ivy/functional/frontends/paddle/tensor/math.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -1,6 +1,6 @@ # global import ivy -from ivy.func_wrapper import with_unsupported_dtypes +from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes from ivy.functional.frontends.paddle.func_wrapper import ( to_ivy_arrays_and_back, ) @@ -40,3 +40,9 @@ @to_ivy_arrays_and_back def acosh(x, name=None): return ivy.acosh(x) + + +@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def asin(x, name=None): + return ivy.asin(x)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -1,6 +1,6 @@\n # global\n import ivy\n-from ivy.func_wrapper import with_unsupported_dtypes\n+from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\n from ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n )\n@@ -40,3 +40,9 @@\n @to_ivy_arrays_and_back\n def acosh(x, name=None):\n return ivy.acosh(x)\n+\n+\n+@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def asin(x, name=None):\n+ return ivy.asin(x)\n", "issue": "asin\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
1,006
224
gh_patches_debug_26207
rasdani/github-patches
git_diff
liqd__a4-opin-663
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor idea to use a4 comments and ratings queryset https://github.com/liqd/adhocracy4/pull/20 </issue> <code> [start of euth/ideas/models.py] 1 from autoslug import AutoSlugField 2 from ckeditor.fields import RichTextField 3 from django.contrib.contenttypes.fields import GenericRelation 4 from django.db import models 5 6 from adhocracy4 import transforms 7 from adhocracy4.comments import models as comment_models 8 from adhocracy4.images import fields 9 from adhocracy4.modules import models as module_models 10 from adhocracy4.ratings import models as rating_models 11 12 13 class IdeaQuerySet(models.QuerySet): 14 15 def _rate_value_condition(self, value): 16 return models.Case( 17 models.When(ratings__value=value, then=models.F('ratings__id')), 18 output_field=models.IntegerField() 19 ) 20 21 def annotate_positive_rating_count(self): 22 return self.annotate( 23 positive_rating_count=models.Count( 24 self._rate_value_condition(1), 25 distinct=True # needed to combine with other count annotations 26 ) 27 ) 28 29 def annotate_negative_rating_count(self): 30 return self.annotate( 31 negative_rating_count=models.Count( 32 self._rate_value_condition(-1), 33 distinct=True # needed to combine with other count annotations 34 ) 35 ) 36 37 def annotate_comment_count(self): 38 return self.annotate( 39 comment_count=models.Count( 40 'comments', 41 distinct=True # needed to combine with other count annotations 42 ) 43 ) 44 45 46 class Idea(module_models.Item): 47 slug = AutoSlugField(populate_from='name', unique=True) 48 name = models.CharField(max_length=120) 49 description = RichTextField() 50 image = fields.ConfiguredImageField( 51 'idea_image', 52 upload_to='ideas/images', 53 blank=True, 54 ) 55 ratings = GenericRelation(rating_models.Rating, 56 related_query_name='idea', 57 object_id_field='object_pk') 58 comments = GenericRelation(comment_models.Comment, 59 related_query_name='idea', 60 object_id_field='object_pk') 61 62 objects = IdeaQuerySet.as_manager() 63 64 def __str__(self): 65 return self.name 66 67 def save(self, *args, **kwargs): 68 self.description = transforms.clean_html_field( 69 self.description) 70 super(Idea, self).save(*args, **kwargs) 71 72 def get_absolute_url(self): 73 from django.core.urlresolvers import reverse 74 return reverse('idea-detail', args=[str(self.slug)]) 75 [end of euth/ideas/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/ideas/models.py b/euth/ideas/models.py --- a/euth/ideas/models.py +++ b/euth/ideas/models.py @@ -6,41 +6,13 @@ from adhocracy4 import transforms from adhocracy4.comments import models as comment_models from adhocracy4.images import fields +from adhocracy4.models import query from adhocracy4.modules import models as module_models from adhocracy4.ratings import models as rating_models -class IdeaQuerySet(models.QuerySet): - - def _rate_value_condition(self, value): - return models.Case( - models.When(ratings__value=value, then=models.F('ratings__id')), - output_field=models.IntegerField() - ) - - def annotate_positive_rating_count(self): - return self.annotate( - positive_rating_count=models.Count( - self._rate_value_condition(1), - distinct=True # needed to combine with other count annotations - ) - ) - - def annotate_negative_rating_count(self): - return self.annotate( - negative_rating_count=models.Count( - self._rate_value_condition(-1), - distinct=True # needed to combine with other count annotations - ) - ) - - def annotate_comment_count(self): - return self.annotate( - comment_count=models.Count( - 'comments', - distinct=True # needed to combine with other count annotations - ) - ) +class IdeaQuerySet(query.RateableQuerySet, query.CommentableQuerySet): + pass class Idea(module_models.Item):
{"golden_diff": "diff --git a/euth/ideas/models.py b/euth/ideas/models.py\n--- a/euth/ideas/models.py\n+++ b/euth/ideas/models.py\n@@ -6,41 +6,13 @@\n from adhocracy4 import transforms\n from adhocracy4.comments import models as comment_models\n from adhocracy4.images import fields\n+from adhocracy4.models import query\n from adhocracy4.modules import models as module_models\n from adhocracy4.ratings import models as rating_models\n \n \n-class IdeaQuerySet(models.QuerySet):\n-\n- def _rate_value_condition(self, value):\n- return models.Case(\n- models.When(ratings__value=value, then=models.F('ratings__id')),\n- output_field=models.IntegerField()\n- )\n-\n- def annotate_positive_rating_count(self):\n- return self.annotate(\n- positive_rating_count=models.Count(\n- self._rate_value_condition(1),\n- distinct=True # needed to combine with other count annotations\n- )\n- )\n-\n- def annotate_negative_rating_count(self):\n- return self.annotate(\n- negative_rating_count=models.Count(\n- self._rate_value_condition(-1),\n- distinct=True # needed to combine with other count annotations\n- )\n- )\n-\n- def annotate_comment_count(self):\n- return self.annotate(\n- comment_count=models.Count(\n- 'comments',\n- distinct=True # needed to combine with other count annotations\n- )\n- )\n+class IdeaQuerySet(query.RateableQuerySet, query.CommentableQuerySet):\n+ pass\n \n \n class Idea(module_models.Item):\n", "issue": "Refactor idea to use a4 comments and ratings queryset\nhttps://github.com/liqd/adhocracy4/pull/20\n", "before_files": [{"content": "from autoslug import AutoSlugField\nfrom ckeditor.fields import RichTextField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.images import fields\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.ratings import models as rating_models\n\n\nclass IdeaQuerySet(models.QuerySet):\n\n def _rate_value_condition(self, value):\n return models.Case(\n models.When(ratings__value=value, then=models.F('ratings__id')),\n output_field=models.IntegerField()\n )\n\n def annotate_positive_rating_count(self):\n return self.annotate(\n positive_rating_count=models.Count(\n self._rate_value_condition(1),\n distinct=True # needed to combine with other count annotations\n )\n )\n\n def annotate_negative_rating_count(self):\n return self.annotate(\n negative_rating_count=models.Count(\n self._rate_value_condition(-1),\n distinct=True # needed to combine with other count annotations\n )\n )\n\n def annotate_comment_count(self):\n return self.annotate(\n comment_count=models.Count(\n 'comments',\n distinct=True # needed to combine with other count annotations\n )\n )\n\n\nclass Idea(module_models.Item):\n slug = AutoSlugField(populate_from='name', unique=True)\n name = models.CharField(max_length=120)\n description = RichTextField()\n image = fields.ConfiguredImageField(\n 'idea_image',\n upload_to='ideas/images',\n blank=True,\n )\n ratings = GenericRelation(rating_models.Rating,\n related_query_name='idea',\n object_id_field='object_pk')\n comments = GenericRelation(comment_models.Comment,\n related_query_name='idea',\n object_id_field='object_pk')\n\n objects = IdeaQuerySet.as_manager()\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(\n self.description)\n super(Idea, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('idea-detail', args=[str(self.slug)])\n", "path": "euth/ideas/models.py"}]}
1,189
353
gh_patches_debug_13000
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-1432
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ExampleApp Syntax Highlighter broken in PyQt5 ### Short description Since merge of #1302, the syntax highlighter in the ExampleApp is broken (CC @j9ac9k ). ### Code to reproduce ```bash cd pyqtgraph\examples python3 . ``` ### Expected behavior Syntax highlighter should work ### Real behavior Following error: ``` Traceback (most recent call last): File "/home/b2/Documents/synced/pyqtgraph/examples/ExampleApp.py", line 298, in highlightBlock format = self.styles[format] AttributeError: 'PythonHighlighter' object has no attribute 'styles' ``` ### Tested environment(s) * PyQtGraph version: 7e57e07068fc8a62e9ae3b68e5556b11b6ef1841 and newer * Qt Python binding: yQt5 5.15.1 Qt 5.15.1 * Python version: Python 3.8.5 * NumPy version: 1.19.2 * Operating system: Linux 5.8.16-2-MANJARO * Installation method: Run in folder (see above) </issue> <code> [start of examples/__main__.py] 1 import sys, os 2 import pyqtgraph as pg 3 from pyqtgraph.Qt import QtCore, QtGui 4 5 6 7 if __name__ == '__main__': 8 if __package__ is None or __package__ == "": 9 parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 10 sys.path.insert(0, parent_dir) 11 import examples 12 __package__ = "examples" 13 14 from .ExampleApp import main as run 15 run() 16 [end of examples/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/__main__.py b/examples/__main__.py --- a/examples/__main__.py +++ b/examples/__main__.py @@ -1,15 +1,16 @@ import sys, os + +# Set up path to contain pyqtgraph module when run without installation +if __name__ == "__main__" and (__package__ is None or __package__==''): + parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + sys.path.insert(0, parent_dir) + import examples + __package__ = "examples" + import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui - if __name__ == '__main__': - if __package__ is None or __package__ == "": - parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - sys.path.insert(0, parent_dir) - import examples - __package__ = "examples" - from .ExampleApp import main as run run()
{"golden_diff": "diff --git a/examples/__main__.py b/examples/__main__.py\n--- a/examples/__main__.py\n+++ b/examples/__main__.py\n@@ -1,15 +1,16 @@\n import sys, os\n+\n+# Set up path to contain pyqtgraph module when run without installation\n+if __name__ == \"__main__\" and (__package__ is None or __package__==''):\n+ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n+ sys.path.insert(0, parent_dir)\n+ import examples\n+ __package__ = \"examples\"\n+\n import pyqtgraph as pg\n from pyqtgraph.Qt import QtCore, QtGui\n \n \n-\n if __name__ == '__main__':\n- if __package__ is None or __package__ == \"\":\n- parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n- sys.path.insert(0, parent_dir)\n- import examples\n- __package__ = \"examples\"\n-\n from .ExampleApp import main as run\n run()\n", "issue": "ExampleApp Syntax Highlighter broken in PyQt5\n### Short description\r\nSince merge of #1302, the syntax highlighter in the ExampleApp is broken (CC @j9ac9k ).\r\n\r\n### Code to reproduce\r\n```bash\r\ncd pyqtgraph\\examples\r\npython3 .\r\n```\r\n\r\n### Expected behavior\r\nSyntax highlighter should work\r\n\r\n### Real behavior\r\nFollowing error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/b2/Documents/synced/pyqtgraph/examples/ExampleApp.py\", line 298, in highlightBlock\r\n format = self.styles[format]\r\nAttributeError: 'PythonHighlighter' object has no attribute 'styles'\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 7e57e07068fc8a62e9ae3b68e5556b11b6ef1841 and newer\r\n * Qt Python binding: yQt5 5.15.1 Qt 5.15.1\r\n * Python version: Python 3.8.5\r\n * NumPy version: 1.19.2\r\n * Operating system: Linux 5.8.16-2-MANJARO\r\n * Installation method: Run in folder (see above)\n", "before_files": [{"content": "import sys, os\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\n\n\n\nif __name__ == '__main__':\n if __package__ is None or __package__ == \"\":\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n sys.path.insert(0, parent_dir)\n import examples\n __package__ = \"examples\"\n\n from .ExampleApp import main as run\n run()\n", "path": "examples/__main__.py"}]}
934
230
gh_patches_debug_25299
rasdani/github-patches
git_diff
facebookresearch__ParlAI-2674
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problem trying to install Use this to open other questions or issues, and provide context here. Hi, I'm kinda new to this, when I was trying to install it, I got the following error: File "setup.py", line 30 version=f'0.1.{DATE}{BUILD}', ^ SyntaxError: invalid syntax I used the command: git clone https://github.com/facebookresearch/ParlAI.git cd ParlAI; python setup.py develop Any Idea why? Thank you. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 7 8 import datetime 9 import sys 10 11 from setuptools import setup, find_packages 12 13 BUILD = '' # if multiple in one day, use "dev0", "dev1", ... 14 DATE = datetime.date.today().isoformat().replace('-', '') 15 16 if sys.version_info < (3, 6): 17 sys.exit('Sorry, Python >=3.6 is required for ParlAI.') 18 19 with open('README.md', encoding="utf8") as f: 20 # strip the header and badges etc 21 readme = f.read().split('--------------------')[-1] 22 23 with open('requirements.txt') as f: 24 reqs = f.read() 25 26 27 if __name__ == '__main__': 28 setup( 29 name='parlai', 30 version=f'0.1.{DATE}{BUILD}', 31 description='Unified platform for dialogue research.', 32 long_description=readme, 33 long_description_content_type='text/markdown', 34 url='http://parl.ai/', 35 python_requires='>=3.6', 36 scripts=['bin/parlai'], 37 packages=find_packages( 38 exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal',) 39 ), 40 install_requires=reqs.strip().split('\n'), 41 include_package_data=True, 42 entry_points={"flake8.extension": ["PAI = parlai.utils.flake8:ParlAIChecker"]}, 43 classifiers=[ 44 "Programming Language :: Python :: 3", 45 "License :: OSI Approved :: MIT License", 46 "Topic :: Scientific/Engineering :: Artificial Intelligence", 47 "Natural Language :: English", 48 ], 49 ) 50 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ from setuptools import setup, find_packages -BUILD = '' # if multiple in one day, use "dev0", "dev1", ... +BUILD = '' # test by setting to ".dev0" if multiple in one day, use ".dev1", ... DATE = datetime.date.today().isoformat().replace('-', '') if sys.version_info < (3, 6): @@ -27,7 +27,7 @@ if __name__ == '__main__': setup( name='parlai', - version=f'0.1.{DATE}{BUILD}', + version='0.1.{DATE}{BUILD}'.format(DATE=DATE, BUILD=BUILD), description='Unified platform for dialogue research.', long_description=readme, long_description_content_type='text/markdown', @@ -39,6 +39,7 @@ ), install_requires=reqs.strip().split('\n'), include_package_data=True, + package_data={'': ['*.txt', '*.md']}, entry_points={"flake8.extension": ["PAI = parlai.utils.flake8:ParlAIChecker"]}, classifiers=[ "Programming Language :: Python :: 3",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n \n from setuptools import setup, find_packages\n \n-BUILD = '' # if multiple in one day, use \"dev0\", \"dev1\", ...\n+BUILD = '' # test by setting to \".dev0\" if multiple in one day, use \".dev1\", ...\n DATE = datetime.date.today().isoformat().replace('-', '')\n \n if sys.version_info < (3, 6):\n@@ -27,7 +27,7 @@\n if __name__ == '__main__':\n setup(\n name='parlai',\n- version=f'0.1.{DATE}{BUILD}',\n+ version='0.1.{DATE}{BUILD}'.format(DATE=DATE, BUILD=BUILD),\n description='Unified platform for dialogue research.',\n long_description=readme,\n long_description_content_type='text/markdown',\n@@ -39,6 +39,7 @@\n ),\n install_requires=reqs.strip().split('\\n'),\n include_package_data=True,\n+ package_data={'': ['*.txt', '*.md']},\n entry_points={\"flake8.extension\": [\"PAI = parlai.utils.flake8:ParlAIChecker\"]},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n", "issue": "Problem trying to install \nUse this to open other questions or issues, and provide context here.\r\nHi, I'm kinda new to this, when I was trying to install it, I got the following error: \r\n\r\n File \"setup.py\", line 30\r\n version=f'0.1.{DATE}{BUILD}',\r\n ^\r\nSyntaxError: invalid syntax\r\n\r\nI used the command: \r\n\r\ngit clone https://github.com/facebookresearch/ParlAI.git\r\ncd ParlAI; python setup.py develop\r\n\r\nAny Idea why?\r\n\r\nThank you. \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport datetime\nimport sys\n\nfrom setuptools import setup, find_packages\n\nBUILD = '' # if multiple in one day, use \"dev0\", \"dev1\", ...\nDATE = datetime.date.today().isoformat().replace('-', '')\n\nif sys.version_info < (3, 6):\n sys.exit('Sorry, Python >=3.6 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n # strip the header and badges etc\n readme = f.read().split('--------------------')[-1]\n\nwith open('requirements.txt') as f:\n reqs = f.read()\n\n\nif __name__ == '__main__':\n setup(\n name='parlai',\n version=f'0.1.{DATE}{BUILD}',\n description='Unified platform for dialogue research.',\n long_description=readme,\n long_description_content_type='text/markdown',\n url='http://parl.ai/',\n python_requires='>=3.6',\n scripts=['bin/parlai'],\n packages=find_packages(\n exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal',)\n ),\n install_requires=reqs.strip().split('\\n'),\n include_package_data=True,\n entry_points={\"flake8.extension\": [\"PAI = parlai.utils.flake8:ParlAIChecker\"]},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Natural Language :: English\",\n ],\n )\n", "path": "setup.py"}]}
1,116
288
gh_patches_debug_61
rasdani/github-patches
git_diff
holoviz__panel-5919
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Plotting styling guides are gone As discussed on Discourse the styling gallery notebooks for Plotly, Altair, Seaborn and Matplotlib have gone missing with Panel 1.0 release. Please re-add. Thx. </issue> <code> [start of panel/models/echarts.py] 1 """ 2 Defines custom bokeh model to render ECharts plots. 3 """ 4 from bokeh.core.properties import ( 5 Any, Dict, Enum, List, Nullable, String, 6 ) 7 from bokeh.events import ModelEvent 8 from bokeh.models import LayoutDOM 9 10 from ..config import config 11 from ..io.resources import bundled_files 12 from ..util import classproperty 13 14 15 class EChartsEvent(ModelEvent): 16 17 event_name = 'echarts_event' 18 19 def __init__(self, model, type=None, data=None, query=None): 20 self.type = type 21 self.data = data 22 self.query = query 23 super().__init__(model=model) 24 25 26 class ECharts(LayoutDOM): 27 """ 28 A Bokeh model that wraps around an ECharts plot and renders it 29 inside a Bokeh. 30 """ 31 32 __javascript_raw__ = [ 33 f"{config.npm_cdn}/[email protected]/dist/echarts.min.js", 34 f"{config.npm_cdn}/[email protected]/dist/echarts-gl.min.js" 35 ] 36 37 @classproperty 38 def __javascript__(cls): 39 return bundled_files(cls) 40 41 @classproperty 42 def __js_skip__(cls): 43 return { 44 'echarts': cls.__javascript__[:1] 45 } 46 47 __js_require__ = { 48 'paths': { 49 "echarts": f"{config.npm_cdn}/[email protected]/dist/echarts.min", 50 "echarts-gl": f"{config.npm_cdn}/[email protected]/dist/echarts-gl.min" 51 }, 52 'exports': {} 53 } 54 55 data = Nullable(Dict(String, Any)) 56 57 options = Nullable(Dict(String, Any)) 58 59 event_config = Dict(String, Any) 60 61 js_events = Dict(String, List(Any)) 62 63 renderer = Enum("canvas", "svg") 64 65 theme = Enum("default", "light", "dark") 66 [end of panel/models/echarts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/panel/models/echarts.py b/panel/models/echarts.py --- a/panel/models/echarts.py +++ b/panel/models/echarts.py @@ -62,4 +62,4 @@ renderer = Enum("canvas", "svg") - theme = Enum("default", "light", "dark") + theme = String("default")
{"golden_diff": "diff --git a/panel/models/echarts.py b/panel/models/echarts.py\n--- a/panel/models/echarts.py\n+++ b/panel/models/echarts.py\n@@ -62,4 +62,4 @@\n \n renderer = Enum(\"canvas\", \"svg\")\n \n- theme = Enum(\"default\", \"light\", \"dark\")\n+ theme = String(\"default\")\n", "issue": "Plotting styling guides are gone\nAs discussed on Discourse the styling gallery notebooks for Plotly, Altair, Seaborn and Matplotlib have gone missing with Panel 1.0 release.\r\n\r\nPlease re-add. Thx.\n", "before_files": [{"content": "\"\"\"\nDefines custom bokeh model to render ECharts plots.\n\"\"\"\nfrom bokeh.core.properties import (\n Any, Dict, Enum, List, Nullable, String,\n)\nfrom bokeh.events import ModelEvent\nfrom bokeh.models import LayoutDOM\n\nfrom ..config import config\nfrom ..io.resources import bundled_files\nfrom ..util import classproperty\n\n\nclass EChartsEvent(ModelEvent):\n\n event_name = 'echarts_event'\n\n def __init__(self, model, type=None, data=None, query=None):\n self.type = type\n self.data = data\n self.query = query\n super().__init__(model=model)\n\n\nclass ECharts(LayoutDOM):\n \"\"\"\n A Bokeh model that wraps around an ECharts plot and renders it\n inside a Bokeh.\n \"\"\"\n\n __javascript_raw__ = [\n f\"{config.npm_cdn}/[email protected]/dist/echarts.min.js\",\n f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min.js\"\n ]\n\n @classproperty\n def __javascript__(cls):\n return bundled_files(cls)\n\n @classproperty\n def __js_skip__(cls):\n return {\n 'echarts': cls.__javascript__[:1]\n }\n\n __js_require__ = {\n 'paths': {\n \"echarts\": f\"{config.npm_cdn}/[email protected]/dist/echarts.min\",\n \"echarts-gl\": f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min\"\n },\n 'exports': {}\n }\n\n data = Nullable(Dict(String, Any))\n\n options = Nullable(Dict(String, Any))\n\n event_config = Dict(String, Any)\n\n js_events = Dict(String, List(Any))\n\n renderer = Enum(\"canvas\", \"svg\")\n\n theme = Enum(\"default\", \"light\", \"dark\")\n", "path": "panel/models/echarts.py"}]}
1,137
87
gh_patches_debug_600
rasdani/github-patches
git_diff
pex-tool__pex-1319
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.39 On the docket: + [x] Running opvault 0.4.9 pex leads to infinite recursion in setup tools #1316 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.38" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.38" +__version__ = "2.1.39"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.38\"\n+__version__ = \"2.1.39\"\n", "issue": "Release 2.1.39\nOn the docket:\r\n+ [x] Running opvault 0.4.9 pex leads to infinite recursion in setup tools #1316\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.38\"\n", "path": "pex/version.py"}]}
625
97
gh_patches_debug_21533
rasdani/github-patches
git_diff
e-valuation__EvaP-464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> there is duplicated code in EvaP while browsing the code, i ran across a lot of structurally very similar blocks of code. this is a reminder for me to search for these code parts, collect a few, and find the cause and solutions for them. examples: - [x] https://github.com/fsr-itse/EvaP/commit/ca4b28a6054944859ec4447fff95fc36095a2ffb. - [x] the contributor course view/template vs the fsr course view/template. - [x] student_index and contributor_index (html and view) - [ ] fsr_import and user_import - [x] the multiselect initialization inside the script blocks exists multiple times in different files. one can find it by searching for select[multiple='multiple'] - [x] https://github.com/janno42/EvaP/blob/reward_points/evap/fsr/templates/fsr_reward_point_redemption_event_delete.html - [x] search for $(".twipsify") and $("input[data-datepicker='datepicker']:not([readonly='True'])") - [x] sortable and apply_ordering functions - [x] if ($.fn.button.noConflict != undefined) - [x] $('#id_degree').combobox() and similar - [x] fsr_course_preview.html, fsr_questionnaire_view.html, contributor_course_preview.html - [x] javascript-code of the sortable forms: [contributor/staff]_course_form.html (or rather, evap_course_edit_js.html), staff_faq_index.html and staff_faq_section.html - [x] all the leave.js occurrences </issue> <code> [start of evap/evaluation/management/commands/merge_users.py] 1 from django.core.management.base import BaseCommand 2 3 from evap.evaluation.models import UserProfile 4 from evap.evaluation.merge import merge_model_objects 5 6 7 class Command(BaseCommand): 8 args = '<user ID 1> <user ID 2>' 9 help = 'Merge two users' 10 11 def handle(self, *args, **options): 12 try: 13 user1 = UserProfile.objects.get(pk=int(args[0])) 14 user2 = UserProfile.objects.get(pk=int(args[1])) 15 16 print("Merging user '{1}' into user '{0}'".format(user1, user2)) 17 merge_model_objects(user1, user2) 18 except Exception: 19 import traceback 20 traceback.print_exc() 21 [end of evap/evaluation/management/commands/merge_users.py] [start of evap/staff/templatetags/staff_templatetags.py] 1 from django.template import Library 2 3 from evap.evaluation.models import Semester 4 5 register = Library() 6 7 8 @register.inclusion_tag("staff_semester_menu.html") 9 def include_staff_semester_menu(): 10 return dict(semesters=Semester.objects.all()[:5]) 11 12 @register.inclusion_tag('staff_course_selection_list.html') 13 def include_staff_course_selection_list(course_forms, btn_label, empty_msg): 14 return { 15 'course_forms': course_forms, 16 'btn_label': btn_label, 17 'empty_msg': empty_msg 18 } 19 20 @register.inclusion_tag('staff_user_labels.html') 21 def include_staff_user_labels(user): 22 return dict(user=user) 23 [end of evap/staff/templatetags/staff_templatetags.py] [start of evap/evaluation/templatetags/evaluation_templatetags.py] 1 from django.template import Library 2 from evap.evaluation.models import Semester 3 4 register = Library() 5 6 7 @register.inclusion_tag("user_list_with_links.html") 8 def include_user_list_with_links(users): 9 return dict(users=users) 10 [end of evap/evaluation/templatetags/evaluation_templatetags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/evaluation/management/commands/merge_users.py b/evap/evaluation/management/commands/merge_users.py --- a/evap/evaluation/management/commands/merge_users.py +++ b/evap/evaluation/management/commands/merge_users.py @@ -5,8 +5,8 @@ class Command(BaseCommand): - args = '<user ID 1> <user ID 2>' - help = 'Merge two users' + args = '<primary user ID> <duplicate user ID>' + help = 'Merges duplicate user into primary user' def handle(self, *args, **options): try: diff --git a/evap/evaluation/templatetags/evaluation_templatetags.py b/evap/evaluation/templatetags/evaluation_templatetags.py --- a/evap/evaluation/templatetags/evaluation_templatetags.py +++ b/evap/evaluation/templatetags/evaluation_templatetags.py @@ -7,3 +7,9 @@ @register.inclusion_tag("user_list_with_links.html") def include_user_list_with_links(users): return dict(users=users) + + [email protected]_tag("sortable_form_js.html") +def include_sortable_form_js(): + return dict() + diff --git a/evap/staff/templatetags/staff_templatetags.py b/evap/staff/templatetags/staff_templatetags.py --- a/evap/staff/templatetags/staff_templatetags.py +++ b/evap/staff/templatetags/staff_templatetags.py @@ -20,3 +20,7 @@ @register.inclusion_tag('staff_user_labels.html') def include_staff_user_labels(user): return dict(user=user) + [email protected]_tag("staff_questionnaire_index_list.html") +def include_staff_questionnaire_index_list(questionnaires, headline): + return dict(questionnaires=questionnaires, headline=headline)
{"golden_diff": "diff --git a/evap/evaluation/management/commands/merge_users.py b/evap/evaluation/management/commands/merge_users.py\n--- a/evap/evaluation/management/commands/merge_users.py\n+++ b/evap/evaluation/management/commands/merge_users.py\n@@ -5,8 +5,8 @@\n \n \n class Command(BaseCommand):\n- args = '<user ID 1> <user ID 2>'\n- help = 'Merge two users'\n+ args = '<primary user ID> <duplicate user ID>'\n+ help = 'Merges duplicate user into primary user'\n \n def handle(self, *args, **options):\n try:\ndiff --git a/evap/evaluation/templatetags/evaluation_templatetags.py b/evap/evaluation/templatetags/evaluation_templatetags.py\n--- a/evap/evaluation/templatetags/evaluation_templatetags.py\n+++ b/evap/evaluation/templatetags/evaluation_templatetags.py\n@@ -7,3 +7,9 @@\n @register.inclusion_tag(\"user_list_with_links.html\")\n def include_user_list_with_links(users):\n return dict(users=users)\n+\n+\[email protected]_tag(\"sortable_form_js.html\")\n+def include_sortable_form_js():\n+ return dict()\n+\ndiff --git a/evap/staff/templatetags/staff_templatetags.py b/evap/staff/templatetags/staff_templatetags.py\n--- a/evap/staff/templatetags/staff_templatetags.py\n+++ b/evap/staff/templatetags/staff_templatetags.py\n@@ -20,3 +20,7 @@\n @register.inclusion_tag('staff_user_labels.html')\n def include_staff_user_labels(user):\n return dict(user=user)\n+\[email protected]_tag(\"staff_questionnaire_index_list.html\")\n+def include_staff_questionnaire_index_list(questionnaires, headline):\n+ return dict(questionnaires=questionnaires, headline=headline)\n", "issue": "there is duplicated code in EvaP\nwhile browsing the code, i ran across a lot of structurally very similar blocks of code. \n\nthis is a reminder for me to search for these code parts, collect a few, and find the cause and solutions for them.\n\nexamples: \n- [x] https://github.com/fsr-itse/EvaP/commit/ca4b28a6054944859ec4447fff95fc36095a2ffb.\n- [x] the contributor course view/template vs the fsr course view/template.\n- [x] student_index and contributor_index (html and view)\n- [ ] fsr_import and user_import\n- [x] the multiselect initialization inside the script blocks exists multiple times in different files. one can find it by searching for select[multiple='multiple']\n- [x] https://github.com/janno42/EvaP/blob/reward_points/evap/fsr/templates/fsr_reward_point_redemption_event_delete.html\n- [x] search for $(\".twipsify\") and $(\"input[data-datepicker='datepicker']:not([readonly='True'])\")\n- [x] sortable and apply_ordering functions\n- [x] if ($.fn.button.noConflict != undefined)\n- [x] $('#id_degree').combobox() and similar\n- [x] fsr_course_preview.html, fsr_questionnaire_view.html, contributor_course_preview.html\n- [x] javascript-code of the sortable forms: [contributor/staff]_course_form.html (or rather, evap_course_edit_js.html), staff_faq_index.html and staff_faq_section.html\n- [x] all the leave.js occurrences\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\n\nfrom evap.evaluation.models import UserProfile\nfrom evap.evaluation.merge import merge_model_objects\n\n\nclass Command(BaseCommand):\n args = '<user ID 1> <user ID 2>'\n help = 'Merge two users'\n\n def handle(self, *args, **options):\n try:\n user1 = UserProfile.objects.get(pk=int(args[0]))\n user2 = UserProfile.objects.get(pk=int(args[1]))\n\n print(\"Merging user '{1}' into user '{0}'\".format(user1, user2))\n merge_model_objects(user1, user2)\n except Exception:\n import traceback\n traceback.print_exc()\n", "path": "evap/evaluation/management/commands/merge_users.py"}, {"content": "from django.template import Library\n\nfrom evap.evaluation.models import Semester\n\nregister = Library()\n\n\[email protected]_tag(\"staff_semester_menu.html\")\ndef include_staff_semester_menu():\n return dict(semesters=Semester.objects.all()[:5])\n\[email protected]_tag('staff_course_selection_list.html')\ndef include_staff_course_selection_list(course_forms, btn_label, empty_msg):\n return {\n 'course_forms': course_forms,\n 'btn_label': btn_label,\n 'empty_msg': empty_msg\n }\n\[email protected]_tag('staff_user_labels.html')\ndef include_staff_user_labels(user):\n return dict(user=user)\n", "path": "evap/staff/templatetags/staff_templatetags.py"}, {"content": "from django.template import Library\nfrom evap.evaluation.models import Semester\n\nregister = Library()\n\n\[email protected]_tag(\"user_list_with_links.html\")\ndef include_user_list_with_links(users):\n return dict(users=users)\n", "path": "evap/evaluation/templatetags/evaluation_templatetags.py"}]}
1,405
460
gh_patches_debug_23967
rasdani/github-patches
git_diff
pwr-Solaar__Solaar-1856
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add git information in about and console **Information** Installed version of solaar:: 1.1.7+git1897-9c361c4~ubuntu22.10.1 Distribution: Ubuntu 22.10 + https://launchpad.net/~solaar-unifying/+archive/ubuntu/ppa (git) Kernel version: Linux 6.1.0-060100rc5-generic x86_64 GNU/Linux Example: a) ![image](https://user-images.githubusercontent.com/48624942/202720677-1d1257ab-3113-429b-b29c-78765161e1c4.png) b) $ solaar show Solaar version 1.1.7-9c361c4 [...] </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 from glob import glob as _glob 4 5 try: 6 from setuptools import setup 7 except ImportError: 8 from distutils.core import setup 9 10 main_ns = {} 11 with open('lib/solaar/__init__.py') as ver_file: 12 exec(ver_file.read(), main_ns) 13 NAME = main_ns['NAME'] 14 __version__ = main_ns['__version__'] 15 16 17 def _data_files(): 18 from os.path import dirname as _dirname 19 20 yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg') 21 yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png') 22 yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg'] 23 24 for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'): 25 yield _dirname(mo), [mo] 26 27 yield 'share/applications', ['share/applications/solaar.desktop'] 28 yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules'] 29 yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml'] 30 31 del _dirname 32 33 34 setup( 35 name=NAME.lower(), 36 version=__version__, 37 description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.', 38 long_description=''' 39 Solaar is a Linux device manager for many Logitech peripherals that connect through 40 Unifying and other receivers or via USB or Bluetooth. 41 Solaar is able to pair/unpair devices with receivers and show and modify some of the 42 modifiable features of devices. 43 For instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(), 44 author='Daniel Pavel', 45 license='GPLv2', 46 url='http://pwr-solaar.github.io/Solaar/', 47 classifiers=[ 48 'Development Status :: 4 - Beta', 49 'Environment :: X11 Applications :: GTK', 50 'Environment :: Console', 51 'Intended Audience :: End Users/Desktop', 52 'License :: DFSG approved', 53 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 54 'Natural Language :: English', 55 'Programming Language :: Python :: 3 :: Only', 56 'Operating System :: POSIX :: Linux', 57 'Topic :: Utilities', 58 ], 59 platforms=['linux'], 60 61 # sudo apt install python-gi python3-gi \ 62 # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1 63 # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'], 64 python_requires='>=3.7', 65 install_requires=[ 66 'evdev (>= 1.1.2)', 67 'pyudev (>= 0.13)', 68 'PyYAML (>= 3.12)', 69 'python-xlib (>= 0.27)', 70 'psutil (>= 5.4.3)', 71 ], 72 extras_require={ 73 'report-descriptor': ['hid-parser'], 74 'desktop-notifications': ['Notify (>= 0.7)'], 75 }, 76 package_dir={'': 'lib'}, 77 packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'], 78 data_files=list(_data_files()), 79 scripts=_glob('bin/*'), 80 ) 81 [end of setup.py] [start of lib/solaar/__init__.py] 1 # -*- python-mode -*- 2 3 ## Copyright (C) 2012-2013 Daniel Pavel 4 ## 5 ## This program is free software; you can redistribute it and/or modify 6 ## it under the terms of the GNU General Public License as published by 7 ## the Free Software Foundation; either version 2 of the License, or 8 ## (at your option) any later version. 9 ## 10 ## This program is distributed in the hope that it will be useful, 11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of 12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 ## GNU General Public License for more details. 14 ## 15 ## You should have received a copy of the GNU General Public License along 16 ## with this program; if not, write to the Free Software Foundation, Inc., 17 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 18 19 __version__ = '1.1.7' 20 NAME = 'Solaar' 21 [end of lib/solaar/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py --- a/lib/solaar/__init__.py +++ b/lib/solaar/__init__.py @@ -16,5 +16,11 @@ ## with this program; if not, write to the Free Software Foundation, Inc., ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -__version__ = '1.1.7' +__version_short__ = '1.1.7' NAME = 'Solaar' + +try: + import gitinfo + __version__ = __version_short__ + '-' + gitinfo.get_git_info()['commit'][:8] +except Exception: + __version__ = __version_short__ diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ with open('lib/solaar/__init__.py') as ver_file: exec(ver_file.read(), main_ns) NAME = main_ns['NAME'] -__version__ = main_ns['__version__'] +__version__ = main_ns['__version_short__'] def _data_files(): @@ -72,6 +72,7 @@ extras_require={ 'report-descriptor': ['hid-parser'], 'desktop-notifications': ['Notify (>= 0.7)'], + 'git-commit': ['python-git-info'], }, package_dir={'': 'lib'}, packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
{"golden_diff": "diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py\n--- a/lib/solaar/__init__.py\n+++ b/lib/solaar/__init__.py\n@@ -16,5 +16,11 @@\n ## with this program; if not, write to the Free Software Foundation, Inc.,\n ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n \n-__version__ = '1.1.7'\n+__version_short__ = '1.1.7'\n NAME = 'Solaar'\n+\n+try:\n+ import gitinfo\n+ __version__ = __version_short__ + '-' + gitinfo.get_git_info()['commit'][:8]\n+except Exception:\n+ __version__ = __version_short__\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,7 @@\n with open('lib/solaar/__init__.py') as ver_file:\n exec(ver_file.read(), main_ns)\n NAME = main_ns['NAME']\n-__version__ = main_ns['__version__']\n+__version__ = main_ns['__version_short__']\n \n \n def _data_files():\n@@ -72,6 +72,7 @@\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n+ 'git-commit': ['python-git-info'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n", "issue": "Add git information in about and console\n**Information**\r\nInstalled version of solaar:: 1.1.7+git1897-9c361c4~ubuntu22.10.1\r\nDistribution: Ubuntu 22.10 + https://launchpad.net/~solaar-unifying/+archive/ubuntu/ppa (git)\r\nKernel version: Linux 6.1.0-060100rc5-generic x86_64 GNU/Linux\r\n\r\nExample:\r\na)\r\n![image](https://user-images.githubusercontent.com/48624942/202720677-1d1257ab-3113-429b-b29c-78765161e1c4.png)\r\n\r\nb)\r\n$ solaar show\r\nSolaar version 1.1.7-9c361c4\r\n\r\n[...]\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nmain_ns = {}\nwith open('lib/solaar/__init__.py') as ver_file:\n exec(ver_file.read(), main_ns)\nNAME = main_ns['NAME']\n__version__ = main_ns['__version__']\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2)',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}, {"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n__version__ = '1.1.7'\nNAME = 'Solaar'\n", "path": "lib/solaar/__init__.py"}]}
1,930
369
gh_patches_debug_2662
rasdani/github-patches
git_diff
scikit-image__scikit-image-6343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> imageIO warnings due to v2 -> v3 migration ## Description As of imageIO 2.16.0 (Feb22) there are now a v2 and v3 namespaces in addition to the top-level namespace. As of 2.16.2 (released Apr22) directly using the top-level namespace results in warnings to either explicitly opt-into the v3 API or opt-out and import the v2. This in turn causes warnings when using `skimage.io.imread`. I suggest that this is a good first issue as there is no API design choices here (at least to start) and only needs the ```python try: import newway except ImportError: import old way ``` dance. The warnings look like (lifted from a test suite): ``` ____________________________________________________________________________ ReaderSequence.test_slice_of_slice ____________________________________________________________________________ pims/tests/test_imseq.py:256: in setUp self.v = self.klass(self.filename, **self.kwargs) pims/image_sequence.py:217: in __init__ with self.reader_cls(self._filepaths[0], **self.kwargs) as reader: pims/image_reader.py:60: in __init__ self._data = Frame(imread(filename, **kwargs), frame_no=0) ../../../../.pybuild/bleeding/lib/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/util.py:43: in file_or_url_context yield resource_name ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_io.py:53: in imread img = call_plugin('imread', fname, plugin=plugin, **plugin_args) ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/manage_plugins.py:207: in call_plugin return func(*args, **kwargs) ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_plugins/imageio_plugin.py:10: in imread return np.asarray(imageio_imread(*args, **kwargs)) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ uri = '/home/tcaswell/source/bnl/soft-matter/pims/pims/tests/data/image_sequence3d/file001.png', format = None, kwargs = {} def imread(uri, format=None, **kwargs): """imread(uri, format=None, **kwargs) Reads an image from the specified file. Returns a numpy array, which comes with a dict of meta data at its 'meta' attribute. Note that the image data is returned as-is, and may not always have a dtype of uint8 (and thus may differ from what e.g. PIL returns). Parameters ---------- uri : {str, pathlib.Path, bytes, file} The resource to load the image from, e.g. a filename, pathlib.Path, http address or file object, see the docs for more info. format : str The format to use to read the file. By default imageio selects the appropriate for you based on the filename and its contents. kwargs : ... Further keyword arguments are passed to the reader. See :func:`.help` to see what arguments are available for a particular format. """ > warnings.warn( "Starting with ImageIO v3 the behavior of this function will switch to that of" " iio.v3.imread. To keep the current behavior (and make this warning dissapear)" " use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.", DeprecationWarning, ) E DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning dissapear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/imageio/__init__.py:89: DeprecationWarning ``` </issue> <code> [start of skimage/io/_plugins/imageio_plugin.py] 1 __all__ = ['imread', 'imsave'] 2 3 from functools import wraps 4 import numpy as np 5 from imageio import imread as imageio_imread, imsave 6 7 8 @wraps(imageio_imread) 9 def imread(*args, **kwargs): 10 return np.asarray(imageio_imread(*args, **kwargs)) 11 [end of skimage/io/_plugins/imageio_plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/io/_plugins/imageio_plugin.py b/skimage/io/_plugins/imageio_plugin.py --- a/skimage/io/_plugins/imageio_plugin.py +++ b/skimage/io/_plugins/imageio_plugin.py @@ -2,7 +2,12 @@ from functools import wraps import numpy as np -from imageio import imread as imageio_imread, imsave + +try: + # Try using the v2 API directly to avoid a warning from imageio >= 2.16.2 + from imageio.v2 import imread as imageio_imread, imsave +except ImportError: + from imageio import imread as imageio_imread, imsave @wraps(imageio_imread)
{"golden_diff": "diff --git a/skimage/io/_plugins/imageio_plugin.py b/skimage/io/_plugins/imageio_plugin.py\n--- a/skimage/io/_plugins/imageio_plugin.py\n+++ b/skimage/io/_plugins/imageio_plugin.py\n@@ -2,7 +2,12 @@\n \n from functools import wraps\n import numpy as np\n-from imageio import imread as imageio_imread, imsave\n+\n+try:\n+ # Try using the v2 API directly to avoid a warning from imageio >= 2.16.2\n+ from imageio.v2 import imread as imageio_imread, imsave\n+except ImportError:\n+ from imageio import imread as imageio_imread, imsave\n \n \n @wraps(imageio_imread)\n", "issue": "imageIO warnings due to v2 -> v3 migration\n## Description\r\n\r\nAs of imageIO 2.16.0 (Feb22) there are now a v2 and v3 namespaces in addition to the top-level namespace. As of 2.16.2 (released Apr22) directly using the top-level namespace results in warnings to either explicitly opt-into the v3 API or opt-out and import the v2. \r\n\r\nThis in turn causes warnings when using `skimage.io.imread`. \r\n\r\nI suggest that this is a good first issue as there is no API design choices here (at least to start) and only needs the \r\n```python\r\ntry: \r\n import newway\r\nexcept ImportError: \r\n import old way\r\n```\r\ndance.\r\n\r\nThe warnings look like (lifted from a test suite):\r\n\r\n```\r\n____________________________________________________________________________ ReaderSequence.test_slice_of_slice ____________________________________________________________________________\r\npims/tests/test_imseq.py:256: in setUp\r\n self.v = self.klass(self.filename, **self.kwargs)\r\npims/image_sequence.py:217: in __init__\r\n with self.reader_cls(self._filepaths[0], **self.kwargs) as reader:\r\npims/image_reader.py:60: in __init__\r\n self._data = Frame(imread(filename, **kwargs), frame_no=0)\r\n../../../../.pybuild/bleeding/lib/python3.11/contextlib.py:155: in __exit__\r\n self.gen.throw(typ, value, traceback)\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/util.py:43: in file_or_url_context\r\n yield resource_name\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_io.py:53: in imread\r\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/manage_plugins.py:207: in call_plugin\r\n return func(*args, **kwargs)\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_plugins/imageio_plugin.py:10: in imread\r\n return np.asarray(imageio_imread(*args, **kwargs))\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nuri = '/home/tcaswell/source/bnl/soft-matter/pims/pims/tests/data/image_sequence3d/file001.png', format = None, kwargs = {}\r\n\r\n def imread(uri, format=None, **kwargs):\r\n \"\"\"imread(uri, format=None, **kwargs)\r\n \r\n Reads an image from the specified file. Returns a numpy array, which\r\n comes with a dict of meta data at its 'meta' attribute.\r\n \r\n Note that the image data is returned as-is, and may not always have\r\n a dtype of uint8 (and thus may differ from what e.g. PIL returns).\r\n \r\n Parameters\r\n ----------\r\n uri : {str, pathlib.Path, bytes, file}\r\n The resource to load the image from, e.g. a filename, pathlib.Path,\r\n http address or file object, see the docs for more info.\r\n format : str\r\n The format to use to read the file. By default imageio selects\r\n the appropriate for you based on the filename and its contents.\r\n kwargs : ...\r\n Further keyword arguments are passed to the reader. See :func:`.help`\r\n to see what arguments are available for a particular format.\r\n \"\"\"\r\n \r\n> warnings.warn(\r\n \"Starting with ImageIO v3 the behavior of this function will switch to that of\"\r\n \" iio.v3.imread. To keep the current behavior (and make this warning dissapear)\"\r\n \" use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.\",\r\n DeprecationWarning,\r\n )\r\nE DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning dissapear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.\r\n\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/imageio/__init__.py:89: DeprecationWarning\r\n\r\n```\n", "before_files": [{"content": "__all__ = ['imread', 'imsave']\n\nfrom functools import wraps\nimport numpy as np\nfrom imageio import imread as imageio_imread, imsave\n\n\n@wraps(imageio_imread)\ndef imread(*args, **kwargs):\n return np.asarray(imageio_imread(*args, **kwargs))\n", "path": "skimage/io/_plugins/imageio_plugin.py"}]}
1,635
166
gh_patches_debug_16306
rasdani/github-patches
git_diff
ARM-DOE__ACT-653
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Example Incorrect ### Description This example says it's for plotting sounding data but the example uses MET data. We should update this to be what it was originally intended. https://arm-doe.github.io/ACT/source/auto_examples/plotting/plot_sonde.html </issue> <code> [start of examples/plotting/plot_sonde.py] 1 """ 2 Plot a timeseries of sounding data 3 ---------------------------------------------------- 4 5 This is a simple example for how to plot a timeseries of sounding 6 data from the ARM SGP site. 7 8 Author: Robert Jackson 9 """ 10 11 from matplotlib import pyplot as plt 12 13 import act 14 15 files = act.tests.sample_files.EXAMPLE_MET_WILDCARD 16 met_ds = act.io.armfiles.read_netcdf(files) 17 print(met_ds) 18 met_temp = met_ds.temp_mean 19 met_rh = met_ds.rh_mean 20 met_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0 21 met_ds['met_lcl'] = met_lcl * 1000.0 22 met_ds['met_lcl'].attrs['units'] = 'm' 23 met_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13' 24 25 # Plot data 26 display = act.plotting.TimeSeriesDisplay(met_ds) 27 display.add_subplots((3,), figsize=(15, 10)) 28 display.plot('wspd_vec_mean', subplot_index=(0,)) 29 display.plot('temp_mean', subplot_index=(1,)) 30 display.plot('rh_mean', subplot_index=(2,)) 31 plt.show() 32 [end of examples/plotting/plot_sonde.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/plotting/plot_sonde.py b/examples/plotting/plot_sonde.py --- a/examples/plotting/plot_sonde.py +++ b/examples/plotting/plot_sonde.py @@ -12,20 +12,14 @@ import act -files = act.tests.sample_files.EXAMPLE_MET_WILDCARD -met_ds = act.io.armfiles.read_netcdf(files) -print(met_ds) -met_temp = met_ds.temp_mean -met_rh = met_ds.rh_mean -met_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0 -met_ds['met_lcl'] = met_lcl * 1000.0 -met_ds['met_lcl'].attrs['units'] = 'm' -met_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13' +files = act.tests.sample_files.EXAMPLE_SONDE1 +sonde_ds = act.io.armfiles.read_netcdf(files) +print(sonde_ds) # Plot data -display = act.plotting.TimeSeriesDisplay(met_ds) +display = act.plotting.TimeSeriesDisplay(sonde_ds) display.add_subplots((3,), figsize=(15, 10)) -display.plot('wspd_vec_mean', subplot_index=(0,)) -display.plot('temp_mean', subplot_index=(1,)) -display.plot('rh_mean', subplot_index=(2,)) +display.plot('wspd', subplot_index=(0,)) +display.plot('tdry', subplot_index=(1,)) +display.plot('rh', subplot_index=(2,)) plt.show()
{"golden_diff": "diff --git a/examples/plotting/plot_sonde.py b/examples/plotting/plot_sonde.py\n--- a/examples/plotting/plot_sonde.py\n+++ b/examples/plotting/plot_sonde.py\n@@ -12,20 +12,14 @@\n \n import act\n \n-files = act.tests.sample_files.EXAMPLE_MET_WILDCARD\n-met_ds = act.io.armfiles.read_netcdf(files)\n-print(met_ds)\n-met_temp = met_ds.temp_mean\n-met_rh = met_ds.rh_mean\n-met_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0\n-met_ds['met_lcl'] = met_lcl * 1000.0\n-met_ds['met_lcl'].attrs['units'] = 'm'\n-met_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13'\n+files = act.tests.sample_files.EXAMPLE_SONDE1\n+sonde_ds = act.io.armfiles.read_netcdf(files)\n+print(sonde_ds)\n \n # Plot data\n-display = act.plotting.TimeSeriesDisplay(met_ds)\n+display = act.plotting.TimeSeriesDisplay(sonde_ds)\n display.add_subplots((3,), figsize=(15, 10))\n-display.plot('wspd_vec_mean', subplot_index=(0,))\n-display.plot('temp_mean', subplot_index=(1,))\n-display.plot('rh_mean', subplot_index=(2,))\n+display.plot('wspd', subplot_index=(0,))\n+display.plot('tdry', subplot_index=(1,))\n+display.plot('rh', subplot_index=(2,))\n plt.show()\n", "issue": "Example Incorrect\n### Description\r\n\r\nThis example says it's for plotting sounding data but the example uses MET data. We should update this to be what it was originally intended.\r\nhttps://arm-doe.github.io/ACT/source/auto_examples/plotting/plot_sonde.html\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nPlot a timeseries of sounding data\n----------------------------------------------------\n\nThis is a simple example for how to plot a timeseries of sounding\ndata from the ARM SGP site.\n\nAuthor: Robert Jackson\n\"\"\"\n\nfrom matplotlib import pyplot as plt\n\nimport act\n\nfiles = act.tests.sample_files.EXAMPLE_MET_WILDCARD\nmet_ds = act.io.armfiles.read_netcdf(files)\nprint(met_ds)\nmet_temp = met_ds.temp_mean\nmet_rh = met_ds.rh_mean\nmet_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0\nmet_ds['met_lcl'] = met_lcl * 1000.0\nmet_ds['met_lcl'].attrs['units'] = 'm'\nmet_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13'\n\n# Plot data\ndisplay = act.plotting.TimeSeriesDisplay(met_ds)\ndisplay.add_subplots((3,), figsize=(15, 10))\ndisplay.plot('wspd_vec_mean', subplot_index=(0,))\ndisplay.plot('temp_mean', subplot_index=(1,))\ndisplay.plot('rh_mean', subplot_index=(2,))\nplt.show()\n", "path": "examples/plotting/plot_sonde.py"}]}
933
380
gh_patches_debug_135
rasdani/github-patches
git_diff
encode__httpx-691
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Version 0.10.0 Let's get squared away what we need for Version 0.10.0. The key here is we need to make sure we've sorted out any API that'd block us from reintroducing the sync API, so... * [x] Let's drop `.read()` from `Request` - It's not documented anywhere, not *actually* required by users, and the content is available on `request.stream` if absolutely needed. #679 * [x] We need `Response.read()` and `Response.close()` to be `Response.aread()` and `Response.aclose()`. The only point where this is relevant is users performing conditional reads inside a `with httpx.stream(method, url) as response` block. #674 * [x] We ought to have `Client.close()` become `Client.aclose()` for consistency. #675 * [x] Good point to get #617 in. * [x] We'll need to change `response.next()`. We could *either* underspecify the return type, and allow it to be either sync or async depending on the context, *or* we use `response.anext()` and `response.next()`. #676 * [ ] ~Good point to address~ #656. * [x] *Potentially* we could introduce an `httpx.AsyncClient` synonm for `httpx.Client`, and advise our users to switch towards that usage, so that there's no breaking changes for them once we fully reintroduce a sync API all the way through. #680 </issue> <code> [start of httpx/__version__.py] 1 __title__ = "httpx" 2 __description__ = "A next generation HTTP client, for Python 3." 3 __version__ = "0.9.5" 4 [end of httpx/__version__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/httpx/__version__.py b/httpx/__version__.py --- a/httpx/__version__.py +++ b/httpx/__version__.py @@ -1,3 +1,3 @@ __title__ = "httpx" __description__ = "A next generation HTTP client, for Python 3." -__version__ = "0.9.5" +__version__ = "0.10.0"
{"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.9.5\"\n+__version__ = \"0.10.0\"\n", "issue": "Version 0.10.0\nLet's get squared away what we need for Version 0.10.0.\r\n\r\nThe key here is we need to make sure we've sorted out any API that'd block us from reintroducing the sync API, so...\r\n\r\n* [x] Let's drop `.read()` from `Request` - It's not documented anywhere, not *actually* required by users, and the content is available on `request.stream` if absolutely needed. #679\r\n* [x] We need `Response.read()` and `Response.close()` to be `Response.aread()` and `Response.aclose()`. The only point where this is relevant is users performing conditional reads inside a `with httpx.stream(method, url) as response` block. #674\r\n* [x] We ought to have `Client.close()` become `Client.aclose()` for consistency. #675\r\n* [x] Good point to get #617 in.\r\n* [x] We'll need to change `response.next()`. We could *either* underspecify the return type, and allow it to be either sync or async depending on the context, *or* we use `response.anext()` and `response.next()`. #676\r\n* [ ] ~Good point to address~ #656.\r\n* [x] *Potentially* we could introduce an `httpx.AsyncClient` synonm for `httpx.Client`, and advise our users to switch towards that usage, so that there's no breaking changes for them once we fully reintroduce a sync API all the way through. #680\n", "before_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.9.5\"\n", "path": "httpx/__version__.py"}]}
922
96
gh_patches_debug_674
rasdani/github-patches
git_diff
pyodide__pyodide-3562
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error about `--user` and `--target` flag when installing xbuildenv I sometimes get following error while installing xbuild environment: ```bash $ pyodide build . Downloading xbuild environment Installing xbuild environment stderr: ERROR: Can not combine '--user' and '--target' [notice] A new release of pip available: 22.3.1 -> 23.0 [notice] To update, run: /home/gitpod/.pyenv/versions/3.10.2/bin/python -m pip install --upgrade pip ``` It happens here, which installs host site packages: https://github.com/pyodide/pyodide/blob/7cc1058358242a5a9012edbb8163d86a860a1a28/pyodide-build/pyodide_build/install_xbuildenv.py#L50-L57 I think we need to add `--no-user` flag explicitly to prevent this error. </issue> <code> [start of pyodide-build/pyodide_build/install_xbuildenv.py] 1 import argparse 2 import json 3 import shutil 4 import subprocess 5 from pathlib import Path 6 from urllib.request import urlopen, urlretrieve 7 8 from .common import exit_with_stdio, get_make_flag, get_pyodide_root 9 from .create_pypa_index import create_pypa_index 10 from .logger import logger 11 12 13 def make_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: 14 parser.description = ( 15 "Install xbuild env.\n\n" 16 "The installed environment is the same as the one that would result from\n" 17 "`PYODIDE_PACKAGES='scipy' make` except that it is much faster.\n" 18 "The goal is to enable out-of-tree builds for binary packages that depend\n" 19 "on numpy or scipy.\n" 20 "Note: this is a private endpoint that should not be used outside of the Pyodide Makefile." 21 ) 22 parser.add_argument("--download", action="store_true", help="Download xbuild env") 23 parser.add_argument("xbuildenv", type=str, nargs=1) 24 return parser 25 26 27 def download_xbuildenv(version: str, xbuildenv_path: Path) -> None: 28 from shutil import rmtree, unpack_archive 29 from tempfile import NamedTemporaryFile 30 31 logger.info("Downloading xbuild environment") 32 rmtree(xbuildenv_path, ignore_errors=True) 33 with NamedTemporaryFile(suffix=".tar") as f: 34 urlretrieve( 35 f"https://github.com/pyodide/pyodide/releases/download/{version}/xbuildenv-{version}.tar.bz2", 36 f.name, 37 ) 38 unpack_archive(f.name, xbuildenv_path) 39 40 41 def install_xbuildenv(version: str, xbuildenv_path: Path) -> None: 42 logger.info("Installing xbuild environment") 43 xbuildenv_path = xbuildenv_path / "xbuildenv" 44 pyodide_root = get_pyodide_root() 45 xbuildenv_root = xbuildenv_path / "pyodide-root" 46 host_site_packages = xbuildenv_root / Path( 47 get_make_flag("HOSTSITEPACKAGES") 48 ).relative_to(pyodide_root) 49 host_site_packages.mkdir(exist_ok=True, parents=True) 50 result = subprocess.run( 51 [ 52 "pip", 53 "install", 54 "-t", 55 host_site_packages, 56 "-r", 57 xbuildenv_path / "requirements.txt", 58 ], 59 capture_output=True, 60 encoding="utf8", 61 ) 62 if result.returncode != 0: 63 exit_with_stdio(result) 64 # Copy the site-packages-extras (coming from the cross-build-files meta.yaml 65 # key) over the site-packages directory with the newly installed packages. 66 shutil.copytree( 67 xbuildenv_path / "site-packages-extras", host_site_packages, dirs_exist_ok=True 68 ) 69 cdn_base = f"https://cdn.jsdelivr.net/pyodide/v{version}/full/" 70 if (xbuildenv_root / "repodata.json").exists(): 71 repodata_bytes = (xbuildenv_root / "repodata.json").read_bytes() 72 else: 73 repodata_url = cdn_base + "repodata.json" 74 with urlopen(repodata_url) as response: 75 repodata_bytes = response.read() 76 repodata = json.loads(repodata_bytes) 77 version = repodata["info"]["version"] 78 create_pypa_index(repodata["packages"], xbuildenv_root, cdn_base) 79 80 81 def main(args: argparse.Namespace) -> None: 82 from . import __version__ 83 84 xbuildenv_path = Path(args.xbuildenv[0]) 85 version = __version__ 86 if args.download: 87 download_xbuildenv(version, xbuildenv_path) 88 install_xbuildenv(version, xbuildenv_path) 89 [end of pyodide-build/pyodide_build/install_xbuildenv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyodide-build/pyodide_build/install_xbuildenv.py b/pyodide-build/pyodide_build/install_xbuildenv.py --- a/pyodide-build/pyodide_build/install_xbuildenv.py +++ b/pyodide-build/pyodide_build/install_xbuildenv.py @@ -51,6 +51,7 @@ [ "pip", "install", + "--no-user", "-t", host_site_packages, "-r",
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/install_xbuildenv.py b/pyodide-build/pyodide_build/install_xbuildenv.py\n--- a/pyodide-build/pyodide_build/install_xbuildenv.py\n+++ b/pyodide-build/pyodide_build/install_xbuildenv.py\n@@ -51,6 +51,7 @@\n [\n \"pip\",\n \"install\",\n+ \"--no-user\",\n \"-t\",\n host_site_packages,\n \"-r\",\n", "issue": "Error about `--user` and `--target` flag when installing xbuildenv\nI sometimes get following error while installing xbuild environment:\r\n\r\n```bash\r\n$ pyodide build .\r\nDownloading xbuild environment\r\nInstalling xbuild environment\r\n stderr:\r\n ERROR: Can not combine '--user' and '--target'\r\n\r\n [notice] A new release of pip available: 22.3.1 -> 23.0\r\n [notice] To update, run: /home/gitpod/.pyenv/versions/3.10.2/bin/python -m pip install --upgrade pip\r\n```\r\n\r\nIt happens here, which installs host site packages:\r\n\r\nhttps://github.com/pyodide/pyodide/blob/7cc1058358242a5a9012edbb8163d86a860a1a28/pyodide-build/pyodide_build/install_xbuildenv.py#L50-L57\r\n\r\nI think we need to add `--no-user` flag explicitly to prevent this error.\n", "before_files": [{"content": "import argparse\nimport json\nimport shutil\nimport subprocess\nfrom pathlib import Path\nfrom urllib.request import urlopen, urlretrieve\n\nfrom .common import exit_with_stdio, get_make_flag, get_pyodide_root\nfrom .create_pypa_index import create_pypa_index\nfrom .logger import logger\n\n\ndef make_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parser.description = (\n \"Install xbuild env.\\n\\n\"\n \"The installed environment is the same as the one that would result from\\n\"\n \"`PYODIDE_PACKAGES='scipy' make` except that it is much faster.\\n\"\n \"The goal is to enable out-of-tree builds for binary packages that depend\\n\"\n \"on numpy or scipy.\\n\"\n \"Note: this is a private endpoint that should not be used outside of the Pyodide Makefile.\"\n )\n parser.add_argument(\"--download\", action=\"store_true\", help=\"Download xbuild env\")\n parser.add_argument(\"xbuildenv\", type=str, nargs=1)\n return parser\n\n\ndef download_xbuildenv(version: str, xbuildenv_path: Path) -> None:\n from shutil import rmtree, unpack_archive\n from tempfile import NamedTemporaryFile\n\n logger.info(\"Downloading xbuild environment\")\n rmtree(xbuildenv_path, ignore_errors=True)\n with NamedTemporaryFile(suffix=\".tar\") as f:\n urlretrieve(\n f\"https://github.com/pyodide/pyodide/releases/download/{version}/xbuildenv-{version}.tar.bz2\",\n f.name,\n )\n unpack_archive(f.name, xbuildenv_path)\n\n\ndef install_xbuildenv(version: str, xbuildenv_path: Path) -> None:\n logger.info(\"Installing xbuild environment\")\n xbuildenv_path = xbuildenv_path / \"xbuildenv\"\n pyodide_root = get_pyodide_root()\n xbuildenv_root = xbuildenv_path / \"pyodide-root\"\n host_site_packages = xbuildenv_root / Path(\n get_make_flag(\"HOSTSITEPACKAGES\")\n ).relative_to(pyodide_root)\n host_site_packages.mkdir(exist_ok=True, parents=True)\n result = subprocess.run(\n [\n \"pip\",\n \"install\",\n \"-t\",\n host_site_packages,\n \"-r\",\n xbuildenv_path / \"requirements.txt\",\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n if result.returncode != 0:\n exit_with_stdio(result)\n # Copy the site-packages-extras (coming from the cross-build-files meta.yaml\n # key) over the site-packages directory with the newly installed packages.\n shutil.copytree(\n xbuildenv_path / \"site-packages-extras\", host_site_packages, dirs_exist_ok=True\n )\n cdn_base = f\"https://cdn.jsdelivr.net/pyodide/v{version}/full/\"\n if (xbuildenv_root / \"repodata.json\").exists():\n repodata_bytes = (xbuildenv_root / \"repodata.json\").read_bytes()\n else:\n repodata_url = cdn_base + \"repodata.json\"\n with urlopen(repodata_url) as response:\n repodata_bytes = response.read()\n repodata = json.loads(repodata_bytes)\n version = repodata[\"info\"][\"version\"]\n create_pypa_index(repodata[\"packages\"], xbuildenv_root, cdn_base)\n\n\ndef main(args: argparse.Namespace) -> None:\n from . import __version__\n\n xbuildenv_path = Path(args.xbuildenv[0])\n version = __version__\n if args.download:\n download_xbuildenv(version, xbuildenv_path)\n install_xbuildenv(version, xbuildenv_path)\n", "path": "pyodide-build/pyodide_build/install_xbuildenv.py"}]}
1,742
106
gh_patches_debug_25505
rasdani/github-patches
git_diff
onnx__onnx-1648
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Proposal: Op Registry Once we start getting into more complicated passes having knowledge about operators becomes incredibly useful. What I propose is to maintain a registry of operators mapping to a set of descriptors. For example let's take a look at the [eliminate_nop_monotone_argmax](https://github.com/onnx/onnx/blob/master/onnx/optimizer/passes/eliminate_nop_monotone_argmax.h) pass. Within the pass we have to maintain an unordered set of ops that are monotone. The correct design would be to have a registry which we can ask whether or not if a certain op is monotone. Once such a registry is implemented, passes can be generalized through descriptors, therefore as we add new ops and their respective descriptors our passes naturally scale out. Some possible descriptors we could have are: - monotonic - elementwise independent (op works on an individual element without knowledge of other elements, such as log, but not softmax since it requires calculating a sum over an axis) - elementwise dependent (softmax, logsoftmax, softmin, softplus) - reduction - shape transform (e.g. transpose, view) @houseroad @bddppq What do you guys think? </issue> <code> [start of onnx/optimizer.py] 1 # ATTENTION: The code in this file is highly EXPERIMENTAL. 2 # Adventurous users should note that the APIs will probably change. 3 4 """onnx optimizer 5 6 This enables users to optimize their models. 7 """ 8 from __future__ import absolute_import 9 from __future__ import division 10 from __future__ import print_function 11 from __future__ import unicode_literals 12 13 import onnx 14 import onnx.onnx_cpp2py_export.optimizer as C 15 from onnx import ModelProto 16 from typing import Text, Sequence, Optional 17 18 """Apply the optimization on the serialized ModelProto. 19 20 Arguments: 21 input (ModelProto): model 22 names (list of string): list of optimization names 23 24 Return: 25 return (ModelProto) optimized model 26 27 Supported pass names: 28 -- nop 29 -- eliminate_identity 30 -- eliminate_nop_transpose 31 -- eliminate_nop_pad 32 -- eliminate_unused_initializer 33 -- fuse_consecutive_squeezes 34 -- fuse_consecutive_transposes 35 -- fuse_add_bias_into_conv 36 -- fuse_transpose_into_gemm 37 """ 38 39 get_available_passes = C.get_available_passes 40 41 42 def optimize(model, passes=None, fixed_point=False): # type: (ModelProto, Optional[Sequence[Text]], bool) -> ModelProto 43 if passes is None: 44 passes = ['eliminate_nop_transpose', 45 'eliminate_nop_pad', 46 'fuse_consecutive_transposes', 47 'fuse_transpose_into_gemm'] 48 if not isinstance(model, ModelProto): 49 raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model))) 50 51 model_str = model.SerializeToString() 52 if fixed_point: 53 optimized_model_str = C.optimize_fixedpoint(model_str, passes) 54 else: 55 optimized_model_str = C.optimize(model_str, passes) 56 57 return onnx.load_from_string(optimized_model_str) 58 [end of onnx/optimizer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/onnx/optimizer.py b/onnx/optimizer.py --- a/onnx/optimizer.py +++ b/onnx/optimizer.py @@ -26,13 +26,23 @@ Supported pass names: -- nop + -- eliminate_deadend -- eliminate_identity - -- eliminate_nop_transpose + -- eliminate_nop_dropout + -- eliminate_nop_monotone_argmax -- eliminate_nop_pad + -- eliminate_nop_transpose -- eliminate_unused_initializer + -- extract_constant_to_initializer + -- fuse_add_bias_into_conv + -- fuse_bn_into_conv + -- fuse_consecutive_concats + -- fuse_consecutive_log_softmax + -- fuse_consecutive_reduce_unsqueeze -- fuse_consecutive_squeezes -- fuse_consecutive_transposes - -- fuse_add_bias_into_conv + -- fuse_matmul_add_bias_into_gemm + -- fuse_pad_into_conv -- fuse_transpose_into_gemm """ @@ -45,8 +55,10 @@ 'eliminate_nop_pad', 'fuse_consecutive_transposes', 'fuse_transpose_into_gemm'] + if not isinstance(model, ModelProto): - raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model))) + raise ValueError( + 'Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model))) model_str = model.SerializeToString() if fixed_point:
{"golden_diff": "diff --git a/onnx/optimizer.py b/onnx/optimizer.py\n--- a/onnx/optimizer.py\n+++ b/onnx/optimizer.py\n@@ -26,13 +26,23 @@\n \n Supported pass names:\n -- nop\n+ -- eliminate_deadend\n -- eliminate_identity\n- -- eliminate_nop_transpose\n+ -- eliminate_nop_dropout\n+ -- eliminate_nop_monotone_argmax\n -- eliminate_nop_pad\n+ -- eliminate_nop_transpose\n -- eliminate_unused_initializer\n+ -- extract_constant_to_initializer\n+ -- fuse_add_bias_into_conv\n+ -- fuse_bn_into_conv\n+ -- fuse_consecutive_concats\n+ -- fuse_consecutive_log_softmax\n+ -- fuse_consecutive_reduce_unsqueeze\n -- fuse_consecutive_squeezes\n -- fuse_consecutive_transposes\n- -- fuse_add_bias_into_conv\n+ -- fuse_matmul_add_bias_into_gemm\n+ -- fuse_pad_into_conv\n -- fuse_transpose_into_gemm\n \"\"\"\n \n@@ -45,8 +55,10 @@\n 'eliminate_nop_pad',\n 'fuse_consecutive_transposes',\n 'fuse_transpose_into_gemm']\n+\n if not isinstance(model, ModelProto):\n- raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))\n+ raise ValueError(\n+ 'Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))\n \n model_str = model.SerializeToString()\n if fixed_point:\n", "issue": "Proposal: Op Registry\nOnce we start getting into more complicated passes having knowledge about operators becomes incredibly useful. What I propose is to maintain a registry of operators mapping to a set of descriptors. \r\n\r\nFor example let's take a look at the [eliminate_nop_monotone_argmax](https://github.com/onnx/onnx/blob/master/onnx/optimizer/passes/eliminate_nop_monotone_argmax.h) pass. Within the pass we have to maintain an unordered set of ops that are monotone. The correct design would be to have a registry which we can ask whether or not if a certain op is monotone. Once such a registry is implemented, passes can be generalized through descriptors, therefore as we add new ops and their respective descriptors our passes naturally scale out.\r\n\r\nSome possible descriptors we could have are:\r\n- monotonic\r\n- elementwise independent (op works on an individual element without knowledge of other elements, such as log, but not softmax since it requires calculating a sum over an axis)\r\n- elementwise dependent (softmax, logsoftmax, softmin, softplus)\r\n- reduction\r\n- shape transform (e.g. transpose, view)\r\n\r\n@houseroad @bddppq What do you guys think?\n", "before_files": [{"content": "# ATTENTION: The code in this file is highly EXPERIMENTAL.\n# Adventurous users should note that the APIs will probably change.\n\n\"\"\"onnx optimizer\n\nThis enables users to optimize their models.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport onnx\nimport onnx.onnx_cpp2py_export.optimizer as C\nfrom onnx import ModelProto\nfrom typing import Text, Sequence, Optional\n\n\"\"\"Apply the optimization on the serialized ModelProto.\n\nArguments:\n input (ModelProto): model\n names (list of string): list of optimization names\n\nReturn:\n return (ModelProto) optimized model\n\nSupported pass names:\n -- nop\n -- eliminate_identity\n -- eliminate_nop_transpose\n -- eliminate_nop_pad\n -- eliminate_unused_initializer\n -- fuse_consecutive_squeezes\n -- fuse_consecutive_transposes\n -- fuse_add_bias_into_conv\n -- fuse_transpose_into_gemm\n\"\"\"\n\nget_available_passes = C.get_available_passes\n\n\ndef optimize(model, passes=None, fixed_point=False): # type: (ModelProto, Optional[Sequence[Text]], bool) -> ModelProto\n if passes is None:\n passes = ['eliminate_nop_transpose',\n 'eliminate_nop_pad',\n 'fuse_consecutive_transposes',\n 'fuse_transpose_into_gemm']\n if not isinstance(model, ModelProto):\n raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))\n\n model_str = model.SerializeToString()\n if fixed_point:\n optimized_model_str = C.optimize_fixedpoint(model_str, passes)\n else:\n optimized_model_str = C.optimize(model_str, passes)\n\n return onnx.load_from_string(optimized_model_str)\n", "path": "onnx/optimizer.py"}]}
1,290
333
gh_patches_debug_1054
rasdani/github-patches
git_diff
liqd__a4-opin-906
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> styling of categories in dashboard (Safari) When using Safari the styling of categories in the dashboard is broken. ![safari styling issue](https://user-images.githubusercontent.com/15954895/28914159-fe84edde-783a-11e7-8ae4-09f0a6b978cd.png) </issue> <code> [start of euth/ideas/forms.py] 1 from adhocracy4.categories import forms as category_forms 2 3 from . import models 4 5 6 class IdeaForm(category_forms.CategorizableForm): 7 class Meta: 8 model = models.Idea 9 fields = ['name', 'description', 'image', 'category'] 10 [end of euth/ideas/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/ideas/forms.py b/euth/ideas/forms.py --- a/euth/ideas/forms.py +++ b/euth/ideas/forms.py @@ -7,3 +7,7 @@ class Meta: model = models.Idea fields = ['name', 'description', 'image', 'category'] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.fields['category'].empty_label = '---'
{"golden_diff": "diff --git a/euth/ideas/forms.py b/euth/ideas/forms.py\n--- a/euth/ideas/forms.py\n+++ b/euth/ideas/forms.py\n@@ -7,3 +7,7 @@\n class Meta:\n model = models.Idea\n fields = ['name', 'description', 'image', 'category']\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.fields['category'].empty_label = '---'\n", "issue": "styling of categories in dashboard (Safari)\nWhen using Safari the styling of categories in the dashboard is broken. \r\n\r\n![safari styling issue](https://user-images.githubusercontent.com/15954895/28914159-fe84edde-783a-11e7-8ae4-09f0a6b978cd.png)\r\n\n", "before_files": [{"content": "from adhocracy4.categories import forms as category_forms\n\nfrom . import models\n\n\nclass IdeaForm(category_forms.CategorizableForm):\n class Meta:\n model = models.Idea\n fields = ['name', 'description', 'image', 'category']\n", "path": "euth/ideas/forms.py"}]}
692
112
gh_patches_debug_5823
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1207
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [PORT] Remove Slack from the list of channels that support Suggested Actions > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4177 Fixes #2291 Removing Slack from the list of channels that do not support suggested actions. # Changed projects * Microsoft.Bot.Builder.Dialogs </issue> <code> [start of libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from botbuilder.core import TurnContext 5 from botframework.connector import Channels 6 7 8 class Channel: 9 """ 10 Methods for determining channel-specific functionality. 11 """ 12 13 @staticmethod 14 def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool: 15 """Determine if a number of Suggested Actions are supported by a Channel. 16 17 Args: 18 channel_id (str): The Channel to check the if Suggested Actions are supported in. 19 button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel. 20 21 Returns: 22 bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not 23 support that number of Suggested Actions. 24 """ 25 26 max_actions = { 27 # https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies 28 Channels.facebook: 10, 29 Channels.skype: 10, 30 # https://developers.line.biz/en/reference/messaging-api/#items-object 31 Channels.line: 13, 32 # https://dev.kik.com/#/docs/messaging#text-response-object 33 Channels.kik: 20, 34 Channels.telegram: 100, 35 Channels.slack: 100, 36 Channels.emulator: 100, 37 Channels.direct_line: 100, 38 Channels.webchat: 100, 39 } 40 return ( 41 button_cnt <= max_actions[channel_id] 42 if channel_id in max_actions 43 else False 44 ) 45 46 @staticmethod 47 def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool: 48 """Determine if a number of Card Actions are supported by a Channel. 49 50 Args: 51 channel_id (str): The Channel to check if the Card Actions are supported in. 52 button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel. 53 54 Returns: 55 bool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support 56 that number of Card Actions. 57 """ 58 59 max_actions = { 60 Channels.facebook: 3, 61 Channels.skype: 3, 62 Channels.ms_teams: 3, 63 Channels.line: 99, 64 Channels.slack: 100, 65 Channels.emulator: 100, 66 Channels.direct_line: 100, 67 Channels.webchat: 100, 68 Channels.cortana: 100, 69 } 70 return ( 71 button_cnt <= max_actions[channel_id] 72 if channel_id in max_actions 73 else False 74 ) 75 76 @staticmethod 77 def has_message_feed(channel_id: str) -> bool: 78 """Determine if a Channel has a Message Feed. 79 80 Args: 81 channel_id (str): The Channel to check for Message Feed. 82 83 Returns: 84 bool: True if the Channel has a Message Feed, False if it does not. 85 """ 86 87 return not channel_id == Channels.cortana 88 89 @staticmethod 90 def max_action_title_length( # pylint: disable=unused-argument 91 channel_id: str, 92 ) -> int: 93 """Maximum length allowed for Action Titles. 94 95 Args: 96 channel_id (str): The Channel to determine Maximum Action Title Length. 97 98 Returns: 99 int: The total number of characters allowed for an Action Title on a specific Channel. 100 """ 101 102 return 20 103 104 @staticmethod 105 def get_channel_id(turn_context: TurnContext) -> str: 106 """Get the Channel Id from the current Activity on the Turn Context. 107 108 Args: 109 turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from. 110 111 Returns: 112 str: The Channel Id from the Turn Context's Activity. 113 """ 114 115 if turn_context.activity.channel_id is None: 116 return "" 117 118 return turn_context.activity.channel_id 119 [end of libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py @@ -32,7 +32,6 @@ # https://dev.kik.com/#/docs/messaging#text-response-object Channels.kik: 20, Channels.telegram: 100, - Channels.slack: 100, Channels.emulator: 100, Channels.direct_line: 100, Channels.webchat: 100,
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py\n@@ -32,7 +32,6 @@\n # https://dev.kik.com/#/docs/messaging#text-response-object\n Channels.kik: 20,\n Channels.telegram: 100,\n- Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n", "issue": "[PORT] Remove Slack from the list of channels that support Suggested Actions\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/4177\n\nFixes #2291 \r\n\r\nRemoving Slack from the list of channels that do not support suggested actions.\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom botbuilder.core import TurnContext\nfrom botframework.connector import Channels\n\n\nclass Channel:\n \"\"\"\n Methods for determining channel-specific functionality.\n \"\"\"\n\n @staticmethod\n def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool:\n \"\"\"Determine if a number of Suggested Actions are supported by a Channel.\n\n Args:\n channel_id (str): The Channel to check the if Suggested Actions are supported in.\n button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.\n\n Returns:\n bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not\n support that number of Suggested Actions.\n \"\"\"\n\n max_actions = {\n # https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies\n Channels.facebook: 10,\n Channels.skype: 10,\n # https://developers.line.biz/en/reference/messaging-api/#items-object\n Channels.line: 13,\n # https://dev.kik.com/#/docs/messaging#text-response-object\n Channels.kik: 20,\n Channels.telegram: 100,\n Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n }\n return (\n button_cnt <= max_actions[channel_id]\n if channel_id in max_actions\n else False\n )\n\n @staticmethod\n def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool:\n \"\"\"Determine if a number of Card Actions are supported by a Channel.\n\n Args:\n channel_id (str): The Channel to check if the Card Actions are supported in.\n button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel.\n\n Returns:\n bool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support\n that number of Card Actions.\n \"\"\"\n\n max_actions = {\n Channels.facebook: 3,\n Channels.skype: 3,\n Channels.ms_teams: 3,\n Channels.line: 99,\n Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n Channels.cortana: 100,\n }\n return (\n button_cnt <= max_actions[channel_id]\n if channel_id in max_actions\n else False\n )\n\n @staticmethod\n def has_message_feed(channel_id: str) -> bool:\n \"\"\"Determine if a Channel has a Message Feed.\n\n Args:\n channel_id (str): The Channel to check for Message Feed.\n\n Returns:\n bool: True if the Channel has a Message Feed, False if it does not.\n \"\"\"\n\n return not channel_id == Channels.cortana\n\n @staticmethod\n def max_action_title_length( # pylint: disable=unused-argument\n channel_id: str,\n ) -> int:\n \"\"\"Maximum length allowed for Action Titles.\n\n Args:\n channel_id (str): The Channel to determine Maximum Action Title Length.\n\n Returns:\n int: The total number of characters allowed for an Action Title on a specific Channel.\n \"\"\"\n\n return 20\n\n @staticmethod\n def get_channel_id(turn_context: TurnContext) -> str:\n \"\"\"Get the Channel Id from the current Activity on the Turn Context.\n\n Args:\n turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.\n\n Returns:\n str: The Channel Id from the Turn Context's Activity.\n \"\"\"\n\n if turn_context.activity.channel_id is None:\n return \"\"\n\n return turn_context.activity.channel_id\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py"}]}
1,762
166
gh_patches_debug_33574
rasdani/github-patches
git_diff
getnikola__nikola-3501
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dispatch PANDOC_OPTIONS base on input extensions <!-- Before creating an issue: * make sure you are using an up-to-date version of Nikola * search for existing issues that might be related Describe your requested features as precisely as possible. --> # Summary Currently, `COMPILERS` only has a key `pandoc`, and `PANDOC_OPTIONS` is set for all the extensions specified there. The proposed feature simply suggests to have configurable dispatch logic to allow multiple extensions calling pandoc with a different set of `PANDOC_OPTIONS`. # Example use case My personal use for this would be to convert one of my site currently using `.md` and `.markdown` to mean a different target format. Both will be converted to HTML, but one of them is slideshow, another is normal "article"/post. Another possible use case is, if one want to specify markdown extensions in pandoc, say `-t markdown+...-...`, then `COMPILERS['pandoc'] = ['.rst', '.md']` won't work with `PANDOC_OPTIONS = ['-t', 'markdown+...-...']` because that would be wrong input format for `.rst`. # Suggested implementation Probably the natural way is to promote ```py PANDOC_OPTIONS: Union[list, Dict[str, list]] ``` where the keys are those in `COMPILERS['pandoc']`. If it makes sense I can send a PR doing that. It seems that it only requires changes in <https://github.com/getnikola/nikola/blob/master/nikola/plugins/compile/pandoc.py#L52-L56>, is it true? </issue> <code> [start of nikola/plugins/compile/pandoc.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2021 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Page compiler plugin for pandoc. 28 29 You will need, of course, to install pandoc 30 """ 31 32 33 import io 34 import os 35 import subprocess 36 37 from nikola.plugin_categories import PageCompiler 38 from nikola.utils import req_missing, makedirs, write_metadata 39 40 41 class CompilePandoc(PageCompiler): 42 """Compile markups into HTML using pandoc.""" 43 44 name = "pandoc" 45 friendly_name = "pandoc" 46 47 def set_site(self, site): 48 """Set Nikola site.""" 49 self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])] 50 super().set_site(site) 51 52 def compile(self, source, dest, is_two_file=True, post=None, lang=None): 53 """Compile the source file into HTML and save as dest.""" 54 makedirs(os.path.dirname(dest)) 55 try: 56 subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS']) 57 with open(dest, 'r', encoding='utf-8-sig') as inf: 58 output, shortcode_deps = self.site.apply_shortcodes(inf.read()) 59 with open(dest, 'w', encoding='utf-8') as outf: 60 outf.write(output) 61 if post is None: 62 if shortcode_deps: 63 self.logger.error( 64 "Cannot save dependencies for post {0} (post unknown)", 65 source) 66 else: 67 post._depfile[dest] += shortcode_deps 68 except OSError as e: 69 if e.strreror == 'No such file or directory': 70 req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False) 71 72 def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None): 73 """Compile into HTML strings.""" 74 raise ValueError("Pandoc compiler does not support compile_string due to multiple output formats") 75 76 def create_post(self, path, **kw): 77 """Create a new post.""" 78 content = kw.pop('content', None) 79 onefile = kw.pop('onefile', False) 80 # is_page is not used by create_post as of now. 81 kw.pop('is_page', False) 82 metadata = {} 83 metadata.update(self.default_metadata) 84 metadata.update(kw) 85 makedirs(os.path.dirname(path)) 86 if not content.endswith('\n'): 87 content += '\n' 88 with io.open(path, "w+", encoding="utf8") as fd: 89 if onefile: 90 fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self)) 91 fd.write(content) 92 [end of nikola/plugins/compile/pandoc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/compile/pandoc.py b/nikola/plugins/compile/pandoc.py --- a/nikola/plugins/compile/pandoc.py +++ b/nikola/plugins/compile/pandoc.py @@ -33,6 +33,8 @@ import io import os import subprocess +from typing import List +from pathlib import Path from nikola.plugin_categories import PageCompiler from nikola.utils import req_missing, makedirs, write_metadata @@ -49,11 +51,30 @@ self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])] super().set_site(site) + def _get_pandoc_options(self, source: str) -> List[str]: + """Obtain pandoc args from config depending on type and file extensions.""" + # Union[List[str], Dict[str, List[str]]] + config_options = self.site.config['PANDOC_OPTIONS'] + type_ = type(config_options) + if type_ is list: + pandoc_options = config_options + elif type_ is dict: + ext = Path(source).suffix + try: + pandoc_options = config_options[ext] + except KeyError: + self.logger.warn('Setting PANDOC_OPTIONS to [], because extension {} is not defined in PANDOC_OPTIONS: {}.'.format(ext, config_options)) + pandoc_options = [] + else: + self.logger.warn('Setting PANDOC_OPTIONS to [], because PANDOC_OPTIONS is expected to be of type Union[List[str], Dict[str, List[str]]] but this is not: {}'.format(config_options)) + pandoc_options = [] + return pandoc_options + def compile(self, source, dest, is_two_file=True, post=None, lang=None): """Compile the source file into HTML and save as dest.""" makedirs(os.path.dirname(dest)) try: - subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS']) + subprocess.check_call(['pandoc', '-o', dest, source] + self._get_pandoc_options(source)) with open(dest, 'r', encoding='utf-8-sig') as inf: output, shortcode_deps = self.site.apply_shortcodes(inf.read()) with open(dest, 'w', encoding='utf-8') as outf:
{"golden_diff": "diff --git a/nikola/plugins/compile/pandoc.py b/nikola/plugins/compile/pandoc.py\n--- a/nikola/plugins/compile/pandoc.py\n+++ b/nikola/plugins/compile/pandoc.py\n@@ -33,6 +33,8 @@\n import io\n import os\n import subprocess\n+from typing import List\n+from pathlib import Path\n \n from nikola.plugin_categories import PageCompiler\n from nikola.utils import req_missing, makedirs, write_metadata\n@@ -49,11 +51,30 @@\n self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])]\n super().set_site(site)\n \n+ def _get_pandoc_options(self, source: str) -> List[str]:\n+ \"\"\"Obtain pandoc args from config depending on type and file extensions.\"\"\"\n+ # Union[List[str], Dict[str, List[str]]]\n+ config_options = self.site.config['PANDOC_OPTIONS']\n+ type_ = type(config_options)\n+ if type_ is list:\n+ pandoc_options = config_options\n+ elif type_ is dict:\n+ ext = Path(source).suffix\n+ try:\n+ pandoc_options = config_options[ext]\n+ except KeyError:\n+ self.logger.warn('Setting PANDOC_OPTIONS to [], because extension {} is not defined in PANDOC_OPTIONS: {}.'.format(ext, config_options))\n+ pandoc_options = []\n+ else:\n+ self.logger.warn('Setting PANDOC_OPTIONS to [], because PANDOC_OPTIONS is expected to be of type Union[List[str], Dict[str, List[str]]] but this is not: {}'.format(config_options))\n+ pandoc_options = []\n+ return pandoc_options\n+\n def compile(self, source, dest, is_two_file=True, post=None, lang=None):\n \"\"\"Compile the source file into HTML and save as dest.\"\"\"\n makedirs(os.path.dirname(dest))\n try:\n- subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS'])\n+ subprocess.check_call(['pandoc', '-o', dest, source] + self._get_pandoc_options(source))\n with open(dest, 'r', encoding='utf-8-sig') as inf:\n output, shortcode_deps = self.site.apply_shortcodes(inf.read())\n with open(dest, 'w', encoding='utf-8') as outf:\n", "issue": "dispatch PANDOC_OPTIONS base on input extensions\n<!--\r\nBefore creating an issue:\r\n* make sure you are using an up-to-date version of Nikola\r\n* search for existing issues that might be related\r\n\r\nDescribe your requested features as precisely as possible. -->\r\n\r\n# Summary\r\n\r\nCurrently, `COMPILERS` only has a key `pandoc`, and `PANDOC_OPTIONS` is set for all the extensions specified there. The proposed feature simply suggests to have configurable dispatch logic to allow multiple extensions calling pandoc with a different set of `PANDOC_OPTIONS`.\r\n\r\n# Example use case\r\n\r\nMy personal use for this would be to convert one of my site currently using `.md` and `.markdown` to mean a different target format. Both will be converted to HTML, but one of them is slideshow, another is normal \"article\"/post.\r\n\r\nAnother possible use case is, if one want to specify markdown extensions in pandoc, say `-t markdown+...-...`, then `COMPILERS['pandoc'] = ['.rst', '.md']` won't work with `PANDOC_OPTIONS = ['-t', 'markdown+...-...']` because that would be wrong input format for `.rst`.\r\n\r\n# Suggested implementation\r\n\r\nProbably the natural way is to promote\r\n\r\n```py\r\nPANDOC_OPTIONS: Union[list, Dict[str, list]]\r\n```\r\n\r\nwhere the keys are those in `COMPILERS['pandoc']`.\r\n\r\nIf it makes sense I can send a PR doing that. It seems that it only requires changes in <https://github.com/getnikola/nikola/blob/master/nikola/plugins/compile/pandoc.py#L52-L56>, is it true?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2021 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Page compiler plugin for pandoc.\n\nYou will need, of course, to install pandoc\n\"\"\"\n\n\nimport io\nimport os\nimport subprocess\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import req_missing, makedirs, write_metadata\n\n\nclass CompilePandoc(PageCompiler):\n \"\"\"Compile markups into HTML using pandoc.\"\"\"\n\n name = \"pandoc\"\n friendly_name = \"pandoc\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])]\n super().set_site(site)\n\n def compile(self, source, dest, is_two_file=True, post=None, lang=None):\n \"\"\"Compile the source file into HTML and save as dest.\"\"\"\n makedirs(os.path.dirname(dest))\n try:\n subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS'])\n with open(dest, 'r', encoding='utf-8-sig') as inf:\n output, shortcode_deps = self.site.apply_shortcodes(inf.read())\n with open(dest, 'w', encoding='utf-8') as outf:\n outf.write(output)\n if post is None:\n if shortcode_deps:\n self.logger.error(\n \"Cannot save dependencies for post {0} (post unknown)\",\n source)\n else:\n post._depfile[dest] += shortcode_deps\n except OSError as e:\n if e.strreror == 'No such file or directory':\n req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False)\n\n def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):\n \"\"\"Compile into HTML strings.\"\"\"\n raise ValueError(\"Pandoc compiler does not support compile_string due to multiple output formats\")\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self))\n fd.write(content)\n", "path": "nikola/plugins/compile/pandoc.py"}]}
1,878
527
gh_patches_debug_3348
rasdani/github-patches
git_diff
Mailu__Mailu-811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dovecot service takes long to start ``` docker stack ps mailu ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS yh6u3jrckt32 mailu_redis.1 redis:alpine prod02.usrpro.com Running Running 4 minutes ago otpt6os15sy3 mailu_smtp.1 mailu/postfix:master prod01.usrpro.com Running Running 4 minutes ago artmzgamypoy mailu_webdav.1 mailu/radicale:master prod03.usrpro.com Running Running 4 minutes ago x5ynw6h5ap3p mailu_imap.1 usrpro/dovecot:workaround-lucene prod03.usrpro.com Running Starting 4 minutes ago n674xs649jlw mailu_admin.1 mailu/admin:master prod01.usrpro.com Running Running 4 minutes ago xrmri5cqjzqn mailu_antivirus.1 mailu/clamav:master prod02.usrpro.com Running Running 2 minutes ago koiuyxlj2cpt mailu_antispam.1 mailu/rspamd:master prod03.usrpro.com Running Running 4 minutes ago hdpu4gq182ha mailu_webmail.1 mailu/rainloop:master prod01.usrpro.com Running Running 4 minutes ago wgqkwnz6wyxp mailu_front.1 mailu/nginx:master prod01.usrpro.com Running Running 4 minutes ago v2eznworzs0g mailu_front.2 mailu/nginx:master prod02.usrpro.com Running Running 4 minutes ago nckm82d8aryd mailu_front.3 mailu/nginx:master prod03.usrpro.com Running Running 4 minutes ago ``` In `start.py`, most probably it is stuck on: ``` multiprocessing.Process(target=start_podop).start() ``` Have to confirm with writing some logging, like requested in issue #588 . </issue> <code> [start of core/dovecot/start.py] 1 #!/usr/bin/python3 2 3 import jinja2 4 import os 5 import socket 6 import glob 7 import multiprocessing 8 import tenacity 9 10 from tenacity import retry 11 from podop import run_server 12 13 14 def start_podop(): 15 os.setuid(8) 16 run_server(3 if "DEBUG" in os.environ else 0, "dovecot", "/tmp/podop.socket", [ 17 ("quota", "url", "http://admin/internal/dovecot/§"), 18 ("auth", "url", "http://admin/internal/dovecot/§"), 19 ("sieve", "url", "http://admin/internal/dovecot/§"), 20 ]) 21 22 convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) 23 24 # Actual startup script 25 resolve = retry(socket.gethostbyname, stop=tenacity.stop_after_attempt(100), wait=tenacity.wait_random(min=2, max=5)) 26 os.environ["FRONT_ADDRESS"] = resolve(os.environ.get("FRONT_ADDRESS", "front")) 27 os.environ["REDIS_ADDRESS"] = resolve(os.environ.get("REDIS_ADDRESS", "redis")) 28 if os.environ["WEBMAIL"] != "none": 29 os.environ["WEBMAIL_ADDRESS"] = resolve(os.environ.get("WEBMAIL_ADDRESS", "webmail")) 30 31 for dovecot_file in glob.glob("/conf/*.conf"): 32 convert(dovecot_file, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) 33 34 # Run Podop, then postfix 35 multiprocessing.Process(target=start_podop).start() 36 os.system("chown -R mail:mail /mail /var/lib/dovecot /conf") 37 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"]) 38 [end of core/dovecot/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/dovecot/start.py b/core/dovecot/start.py --- a/core/dovecot/start.py +++ b/core/dovecot/start.py @@ -33,5 +33,6 @@ # Run Podop, then postfix multiprocessing.Process(target=start_podop).start() -os.system("chown -R mail:mail /mail /var/lib/dovecot /conf") +os.system("chown mail:mail /mail") +os.system("chown -R mail:mail /var/lib/dovecot /conf") os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
{"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -33,5 +33,6 @@\n \n # Run Podop, then postfix\n multiprocessing.Process(target=start_podop).start()\n-os.system(\"chown -R mail:mail /mail /var/lib/dovecot /conf\")\n+os.system(\"chown mail:mail /mail\")\n+os.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\n os.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "issue": "Dovecot service takes long to start\n```\r\ndocker stack ps mailu\r\nID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS\r\nyh6u3jrckt32 mailu_redis.1 redis:alpine prod02.usrpro.com Running Running 4 minutes ago \r\notpt6os15sy3 mailu_smtp.1 mailu/postfix:master prod01.usrpro.com Running Running 4 minutes ago \r\nartmzgamypoy mailu_webdav.1 mailu/radicale:master prod03.usrpro.com Running Running 4 minutes ago \r\nx5ynw6h5ap3p mailu_imap.1 usrpro/dovecot:workaround-lucene prod03.usrpro.com Running Starting 4 minutes ago \r\nn674xs649jlw mailu_admin.1 mailu/admin:master prod01.usrpro.com Running Running 4 minutes ago \r\nxrmri5cqjzqn mailu_antivirus.1 mailu/clamav:master prod02.usrpro.com Running Running 2 minutes ago \r\nkoiuyxlj2cpt mailu_antispam.1 mailu/rspamd:master prod03.usrpro.com Running Running 4 minutes ago \r\nhdpu4gq182ha mailu_webmail.1 mailu/rainloop:master prod01.usrpro.com Running Running 4 minutes ago \r\nwgqkwnz6wyxp mailu_front.1 mailu/nginx:master prod01.usrpro.com Running Running 4 minutes ago \r\nv2eznworzs0g mailu_front.2 mailu/nginx:master prod02.usrpro.com Running Running 4 minutes ago \r\nnckm82d8aryd mailu_front.3 mailu/nginx:master prod03.usrpro.com Running Running 4 minutes ago \r\n```\r\nIn `start.py`, most probably it is stuck on:\r\n```\r\nmultiprocessing.Process(target=start_podop).start()\r\n```\r\n\r\nHave to confirm with writing some logging, like requested in issue #588 .\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport jinja2\nimport os\nimport socket\nimport glob\nimport multiprocessing\nimport tenacity\n\nfrom tenacity import retry\nfrom podop import run_server\n\n\ndef start_podop():\n os.setuid(8)\n run_server(3 if \"DEBUG\" in os.environ else 0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", \"http://admin/internal/dovecot/\u00a7\"),\n\t\t(\"auth\", \"url\", \"http://admin/internal/dovecot/\u00a7\"),\n\t\t(\"sieve\", \"url\", \"http://admin/internal/dovecot/\u00a7\"),\n ])\n\nconvert = lambda src, dst: open(dst, \"w\").write(jinja2.Template(open(src).read()).render(**os.environ))\n\n# Actual startup script\nresolve = retry(socket.gethostbyname, stop=tenacity.stop_after_attempt(100), wait=tenacity.wait_random(min=2, max=5))\nos.environ[\"FRONT_ADDRESS\"] = resolve(os.environ.get(\"FRONT_ADDRESS\", \"front\"))\nos.environ[\"REDIS_ADDRESS\"] = resolve(os.environ.get(\"REDIS_ADDRESS\", \"redis\"))\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = resolve(os.environ.get(\"WEBMAIL_ADDRESS\", \"webmail\"))\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n convert(dovecot_file, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown -R mail:mail /mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]}
1,514
151
gh_patches_debug_6841
rasdani/github-patches
git_diff
activeloopai__deeplake-1738
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FEATURE]Do not overwrite python logging configuration ## 🚨🚨 Feature Request Be able to use both `logging` and `hub` in the same python script ### Current behavior I found the following code has undesired behavior: when adding the line `import hub`, the logging configuration will be overwritten. ```python import logging import hub # this will overwrite the logging config logging.basicConfig(filename = "sample.log", level = logging.DEBUG, format = '%(asctime)s: %(filename)s: %(levelname)s: %(message)s') logger = logging.getLogger(__name__) logger.debug("this is a debug message") logger.info("this is an info message") logger.warning("this is a warning message") logger.error("this is an error message") ``` </issue> <code> [start of hub/client/log.py] 1 """ 2 License: 3 This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. 4 If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. 5 """ 6 7 import logging 8 import sys 9 10 logger = logging.getLogger("hub") 11 12 13 def configure_logger(debug=0): 14 log_level = logging.DEBUG if debug == 1 else logging.INFO 15 logging.basicConfig(format="%(message)s", level=log_level, stream=sys.stdout) 16 17 18 configure_logger(0) 19 [end of hub/client/log.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hub/client/log.py b/hub/client/log.py --- a/hub/client/log.py +++ b/hub/client/log.py @@ -12,7 +12,17 @@ def configure_logger(debug=0): log_level = logging.DEBUG if debug == 1 else logging.INFO - logging.basicConfig(format="%(message)s", level=log_level, stream=sys.stdout) + logger.setLevel(log_level) + + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setLevel(log_level) + + formatter = logging.Formatter("%(message)s") + + stream_handler.setFormatter(formatter) + + logger.addHandler(stream_handler) + logger.propagate = False configure_logger(0)
{"golden_diff": "diff --git a/hub/client/log.py b/hub/client/log.py\n--- a/hub/client/log.py\n+++ b/hub/client/log.py\n@@ -12,7 +12,17 @@\n \n def configure_logger(debug=0):\n log_level = logging.DEBUG if debug == 1 else logging.INFO\n- logging.basicConfig(format=\"%(message)s\", level=log_level, stream=sys.stdout)\n+ logger.setLevel(log_level)\n+\n+ stream_handler = logging.StreamHandler(stream=sys.stdout)\n+ stream_handler.setLevel(log_level)\n+\n+ formatter = logging.Formatter(\"%(message)s\")\n+\n+ stream_handler.setFormatter(formatter)\n+\n+ logger.addHandler(stream_handler)\n+ logger.propagate = False\n \n \n configure_logger(0)\n", "issue": "[FEATURE]Do not overwrite python logging configuration\n## \ud83d\udea8\ud83d\udea8 Feature Request\r\n\r\nBe able to use both `logging` and `hub` in the same python script\r\n\r\n### Current behavior\r\n\r\nI found the following code has undesired behavior: when adding the line `import hub`, the logging configuration will be overwritten.\r\n\r\n```python\r\nimport logging\r\nimport hub # this will overwrite the logging config\r\n\r\nlogging.basicConfig(filename = \"sample.log\", level = logging.DEBUG,\r\n format = '%(asctime)s: %(filename)s: %(levelname)s: %(message)s')\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nlogger.debug(\"this is a debug message\")\r\nlogger.info(\"this is an info message\")\r\nlogger.warning(\"this is a warning message\")\r\nlogger.error(\"this is an error message\")\r\n\r\n```\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nLicense:\nThis Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\"\n\nimport logging\nimport sys\n\nlogger = logging.getLogger(\"hub\")\n\n\ndef configure_logger(debug=0):\n log_level = logging.DEBUG if debug == 1 else logging.INFO\n logging.basicConfig(format=\"%(message)s\", level=log_level, stream=sys.stdout)\n\n\nconfigure_logger(0)\n", "path": "hub/client/log.py"}]}
843
158
gh_patches_debug_24390
rasdani/github-patches
git_diff
pulp__pulpcore-4188
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'." **Version** 3.16, but probably all versions **Describe the bug** - Reclaim space for repository fails with the following error. ~~~ Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})" ~~~ **To Reproduce** See BZ **Additional context** https://bugzilla.redhat.com/show_bug.cgi?id=2169322 </issue> <code> [start of pulpcore/app/tasks/reclaim_space.py] 1 from pulpcore.app.models import ( 2 Artifact, 3 Content, 4 ContentArtifact, 5 ProgressReport, 6 PublishedMetadata, 7 Repository, 8 RepositoryVersion, 9 ) 10 from pulpcore.app.util import get_domain 11 12 13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False): 14 """ 15 This task frees-up disk space by removing Artifact files from the filesystem for Content 16 exclusive to the list of provided repos. 17 18 Note: content marked as `proctected` will be excluded from the reclaim disk space. 19 20 Kwargs: 21 repo_pks (list): A list of repo pks the disk reclaim space is performed on. 22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim 23 disk space. 24 force (bool): If True, uploaded content will be taken into account. 25 26 """ 27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks) 28 for repo in reclaimed_repos: 29 repo.invalidate_cache(everything=True) 30 31 domain = get_domain() 32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks) 33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos) 34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks) 35 c_reclaim_qs = c_reclaim_qs.exclude( 36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type() 37 ) 38 39 if keeplist_rv_pks: 40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks) 41 rv_content = Content.objects.none() 42 for rv in rv_qs.iterator(): 43 rv_content |= rv.content 44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content) 45 46 content_distinct = c_reclaim_qs.distinct("pulp_type") 47 unprotected = [] 48 for content in content_distinct: 49 if not content.cast().PROTECTED_FROM_RECLAIM: 50 unprotected.append(content.pulp_type) 51 52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter( 53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False 54 ) 55 if not force: 56 ca_qs = ca_qs.filter(remoteartifact__isnull=False) 57 artifact_pks = set() 58 ca_to_update = [] 59 for ca in ca_qs.iterator(): 60 if ca.content.pulp_type in unprotected: 61 artifact_pks.add(ca.artifact.pk) 62 ca.artifact = None 63 ca_to_update.append(ca) 64 65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000) 66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks) 67 progress_bar = ProgressReport( 68 message="Reclaim disk space", 69 total=artifacts_to_delete.count(), 70 code="reclaim-space.artifact", 71 done=0, 72 state="running", 73 ) 74 progress_bar.save() 75 76 counter = 0 77 interval = 100 78 for artifact in artifacts_to_delete.iterator(): 79 # we need to manually call delete() because it cleans up the file on the filesystem 80 artifact.delete() 81 progress_bar.done += 1 82 counter += 1 83 84 if counter >= interval: 85 progress_bar.save() 86 counter = 0 87 88 progress_bar.state = "completed" 89 progress_bar.save() 90 [end of pulpcore/app/tasks/reclaim_space.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py --- a/pulpcore/app/tasks/reclaim_space.py +++ b/pulpcore/app/tasks/reclaim_space.py @@ -1,3 +1,7 @@ +from logging import getLogger + +from django.db.models.deletion import ProtectedError + from pulpcore.app.models import ( Artifact, Content, @@ -9,6 +13,8 @@ ) from pulpcore.app.util import get_domain +log = getLogger(__name__) + def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False): """ @@ -76,10 +82,16 @@ counter = 0 interval = 100 for artifact in artifacts_to_delete.iterator(): - # we need to manually call delete() because it cleans up the file on the filesystem - artifact.delete() - progress_bar.done += 1 - counter += 1 + try: + # we need to manually call delete() because it cleans up the file on the filesystem + artifact.delete() + except ProtectedError as e: + # Rarely artifact could be shared between to different content units. + # Just log and skip the artifact deletion in this case + log.info(e) + else: + progress_bar.done += 1 + counter += 1 if counter >= interval: progress_bar.save()
{"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]}
1,628
323
gh_patches_debug_53978
rasdani/github-patches
git_diff
scikit-hep__pyhf-1091
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add CodeMeta JSON-LD Context File # Description As part of an IRIS-HEP initiative to improve the citation quality/ease for all software it has been recommended by @danielskatz that projects: - Track authors/contributors to software as it is developed - Create a citation metadata file in the software repo and keep it up to date - Link Github and Zenodo to get DOIs for major releases - In the README and docs clearly says how to cite the software `pyhf` already does all of this, as: - [x] There is an [`AUTHORS` file](https://github.com/scikit-hep/pyhf/blob/31d34a0edd91593d75181cc3ad71563a52600793/AUTHORS) as well as a [listing of the authors in the docs](https://scikit-hep.org/pyhf/index.html#authors) - [x] There is a [`.zenodo.json`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/.zenodo.json) that formats Zenodo with the correct metadata and adds the ORCID of the authors - [x] There is a Zenodo DOI that is kept in sync with our release schedule (almost) fully automatically (almost as it requires 1 button click). [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1169739.svg)](https://doi.org/10.5281/zenodo.1169739) - [x] There is a Citation section in both the [`README`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/README.rst#citation) and the [docs webpage](https://scikit-hep.org/pyhf/citations.html) with the preferred citation It has been pointed out though that we should also look into adding a [CodeMeta Project JSON-LD Context File](https://codemeta.github.io/jsonld/) as with it > zenodo will read [it] and will be basically the same as [schema.org](http://schema.org/) so that this will help google index software, and tools be able to use the metadata file in [softwareheritage.org](http://softwareheritage.org/) to automatically build a citation for a particular commit. (cc @gordonwatts as this might be of interest) </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major 7 'tensorflow-probability~=0.10.0', 8 ], 9 'torch': ['torch~=1.2'], 10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes 12 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch 13 } 14 extras_require['backends'] = sorted( 15 set( 16 extras_require['tensorflow'] 17 + extras_require['torch'] 18 + extras_require['jax'] 19 + extras_require['minuit'] 20 ) 21 ) 22 extras_require['contrib'] = sorted(set(['matplotlib', 'requests'])) 23 extras_require['lint'] = sorted(set(['pyflakes', 'black'])) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + extras_require['shellcomplete'] 31 + [ 32 'pytest~=6.0', 33 'pytest-cov>=2.5.1', 34 'pytest-mock', 35 'pytest-benchmark[histogram]', 36 'pytest-console-scripts', 37 'pytest-mpl', 38 'pydocstyle', 39 'coverage>=4.0', # coveralls 40 'papermill~=2.0', 41 'nteract-scrapbook~=0.2', 42 'jupyter', 43 'uproot~=3.3', 44 'graphviz', 45 'jsonpatch', 46 ] 47 ) 48 ) 49 extras_require['docs'] = sorted( 50 set( 51 [ 52 'sphinx>=3.1.2', 53 'sphinxcontrib-bibtex', 54 'sphinx-click', 55 'sphinx_rtd_theme', 56 'nbsphinx', 57 'ipywidgets', 58 'sphinx-issues', 59 'sphinx-copybutton>0.2.9', 60 ] 61 ) 62 ) 63 extras_require['develop'] = sorted( 64 set( 65 extras_require['docs'] 66 + extras_require['lint'] 67 + extras_require['test'] 68 + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine'] 69 ) 70 ) 71 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 72 73 74 setup( 75 extras_require=extras_require, 76 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 77 ) 78 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -65,7 +65,15 @@ extras_require['docs'] + extras_require['lint'] + extras_require['test'] - + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine'] + + [ + 'nbdime', + 'bump2version', + 'ipython', + 'pre-commit', + 'check-manifest', + 'codemetapy>=0.3.4', + 'twine', + ] ) ) extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,15 @@\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n- + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n+ + [\n+ 'nbdime',\n+ 'bump2version',\n+ 'ipython',\n+ 'pre-commit',\n+ 'check-manifest',\n+ 'codemetapy>=0.3.4',\n+ 'twine',\n+ ]\n )\n )\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n", "issue": "Add CodeMeta JSON-LD Context File\n# Description\r\n\r\nAs part of an IRIS-HEP initiative to improve the citation quality/ease for all software it has been recommended by @danielskatz that projects:\r\n- Track authors/contributors to software as it is developed\r\n- Create a citation metadata file in the software repo and keep it up to date\r\n- Link Github and Zenodo to get DOIs for major releases\r\n- In the README and docs clearly says how to cite the software\r\n\r\n`pyhf` already does all of this, as:\r\n- [x] There is an [`AUTHORS` file](https://github.com/scikit-hep/pyhf/blob/31d34a0edd91593d75181cc3ad71563a52600793/AUTHORS) as well as a [listing of the authors in the docs](https://scikit-hep.org/pyhf/index.html#authors)\r\n- [x] There is a [`.zenodo.json`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/.zenodo.json) that formats Zenodo with the correct metadata and adds the ORCID of the authors\r\n- [x] There is a Zenodo DOI that is kept in sync with our release schedule (almost) fully automatically (almost as it requires 1 button click). [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1169739.svg)](https://doi.org/10.5281/zenodo.1169739)\r\n- [x] There is a Citation section in both the [`README`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/README.rst#citation) and the [docs webpage](https://scikit-hep.org/pyhf/citations.html) with the preferred citation\r\n\r\nIt has been pointed out though that we should also look into adding a [CodeMeta Project JSON-LD Context File](https://codemeta.github.io/jsonld/) as with it\r\n\r\n> zenodo will read [it] and will be basically the same as [schema.org](http://schema.org/) so that this will help google index software, and tools be able to use the metadata file in [softwareheritage.org](http://softwareheritage.org/) to automatically build a citation for a particular commit.\r\n\r\n(cc @gordonwatts as this might be of interest)\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib', 'requests']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,865
164
gh_patches_debug_9416
rasdani/github-patches
git_diff
searxng__searxng-1686
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: bing videos engine **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** Repository: https://github.com/searxng/searxng Branch: master Version: 2022.08.14-649d1a14 <!-- Check if these values are correct --> **How did you install SearXNG?** searxng-docker **What happened?** bing video search crashed **How To Reproduce** search for anything **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> **Screenshots & Logs** <!-- If applicable, add screenshots, logs to help explain your problem. --> **Additional context** <!-- Add any other context about the problem here. --> **Technical report** Error * Error: IndexError * Percentage: 100 * Parameters: `()` * File name: `searx/engines/bing_videos.py:84` * Function: `response` * Code: `metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0])` </issue> <code> [start of searx/engines/bing_videos.py] 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 """Bing (Videos) 4 5 """ 6 7 from json import loads 8 from urllib.parse import urlencode 9 10 from lxml import html 11 12 from searx.utils import match_language 13 from searx.engines.bing import language_aliases 14 15 from searx.engines.bing import ( # pylint: disable=unused-import 16 _fetch_supported_languages, 17 supported_languages_url, 18 ) 19 20 about = { 21 "website": 'https://www.bing.com/videos', 22 "wikidata_id": 'Q4914152', 23 "official_api_documentation": 'https://www.microsoft.com/en-us/bing/apis/bing-video-search-api', 24 "use_official_api": False, 25 "require_api_key": False, 26 "results": 'HTML', 27 } 28 29 categories = ['videos', 'web'] 30 paging = True 31 safesearch = True 32 time_range_support = True 33 send_accept_language_header = True 34 number_of_results = 28 35 36 base_url = 'https://www.bing.com/' 37 search_string = ( 38 # fmt: off 39 'videos/search' 40 '?{query}' 41 '&count={count}' 42 '&first={first}' 43 '&scope=video' 44 '&FORM=QBLH' 45 # fmt: on 46 ) 47 time_range_string = '&qft=+filterui:videoage-lt{interval}' 48 time_range_dict = {'day': '1440', 'week': '10080', 'month': '43200', 'year': '525600'} 49 50 # safesearch definitions 51 safesearch_types = {2: 'STRICT', 1: 'DEMOTE', 0: 'OFF'} 52 53 54 # do search-request 55 def request(query, params): 56 offset = ((params['pageno'] - 1) * number_of_results) + 1 57 58 search_path = search_string.format(query=urlencode({'q': query}), count=number_of_results, first=offset) 59 60 # safesearch cookie 61 params['cookies']['SRCHHPGUSR'] = 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE') 62 63 # language cookie 64 language = match_language(params['language'], supported_languages, language_aliases).lower() 65 params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1' 66 67 # query and paging 68 params['url'] = base_url + search_path 69 70 # time range 71 if params['time_range'] in time_range_dict: 72 params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']]) 73 74 return params 75 76 77 # get response from search-request 78 def response(resp): 79 results = [] 80 81 dom = html.fromstring(resp.text) 82 83 for result in dom.xpath('//div[@class="dg_u"]'): 84 metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0]) 85 info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip() 86 content = '{0} - {1}'.format(metadata['du'], info) 87 thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid']) 88 results.append( 89 { 90 'url': metadata['murl'], 91 'thumbnail': thumbnail, 92 'title': metadata.get('vt', ''), 93 'content': content, 94 'template': 'videos.html', 95 } 96 ) 97 98 return results 99 [end of searx/engines/bing_videos.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py --- a/searx/engines/bing_videos.py +++ b/searx/engines/bing_videos.py @@ -80,7 +80,7 @@ dom = html.fromstring(resp.text) - for result in dom.xpath('//div[@class="dg_u"]'): + for result in dom.xpath('//div[@class="dg_u"]/div[contains(@class, "mc_vtvc")]'): metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0]) info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip() content = '{0} - {1}'.format(metadata['du'], info)
{"golden_diff": "diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py\n--- a/searx/engines/bing_videos.py\n+++ b/searx/engines/bing_videos.py\n@@ -80,7 +80,7 @@\n \n dom = html.fromstring(resp.text)\n \n- for result in dom.xpath('//div[@class=\"dg_u\"]'):\n+ for result in dom.xpath('//div[@class=\"dg_u\"]/div[contains(@class, \"mc_vtvc\")]'):\n metadata = loads(result.xpath('.//div[@class=\"vrhdata\"]/@vrhm')[0])\n info = ' - '.join(result.xpath('.//div[@class=\"mc_vtvc_meta_block\"]//span/text()')).strip()\n content = '{0} - {1}'.format(metadata['du'], info)\n", "issue": "Bug: bing videos engine\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2022.08.14-649d1a14\r\n<!-- Check if these values are correct -->\r\n\r\n**How did you install SearXNG?**\r\nsearxng-docker\r\n\r\n**What happened?**\r\nbing video search crashed\r\n\r\n**How To Reproduce**\r\nsearch for anything\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: IndexError\r\n * Percentage: 100\r\n * Parameters: `()`\r\n * File name: `searx/engines/bing_videos.py:84`\r\n * Function: `response`\r\n * Code: `metadata = loads(result.xpath('.//div[@class=\"vrhdata\"]/@vrhm')[0])`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bing (Videos)\n\n\"\"\"\n\nfrom json import loads\nfrom urllib.parse import urlencode\n\nfrom lxml import html\n\nfrom searx.utils import match_language\nfrom searx.engines.bing import language_aliases\n\nfrom searx.engines.bing import ( # pylint: disable=unused-import\n _fetch_supported_languages,\n supported_languages_url,\n)\n\nabout = {\n \"website\": 'https://www.bing.com/videos',\n \"wikidata_id\": 'Q4914152',\n \"official_api_documentation\": 'https://www.microsoft.com/en-us/bing/apis/bing-video-search-api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\ncategories = ['videos', 'web']\npaging = True\nsafesearch = True\ntime_range_support = True\nsend_accept_language_header = True\nnumber_of_results = 28\n\nbase_url = 'https://www.bing.com/'\nsearch_string = (\n # fmt: off\n 'videos/search'\n '?{query}'\n '&count={count}'\n '&first={first}'\n '&scope=video'\n '&FORM=QBLH'\n # fmt: on\n)\ntime_range_string = '&qft=+filterui:videoage-lt{interval}'\ntime_range_dict = {'day': '1440', 'week': '10080', 'month': '43200', 'year': '525600'}\n\n# safesearch definitions\nsafesearch_types = {2: 'STRICT', 1: 'DEMOTE', 0: 'OFF'}\n\n\n# do search-request\ndef request(query, params):\n offset = ((params['pageno'] - 1) * number_of_results) + 1\n\n search_path = search_string.format(query=urlencode({'q': query}), count=number_of_results, first=offset)\n\n # safesearch cookie\n params['cookies']['SRCHHPGUSR'] = 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')\n\n # language cookie\n language = match_language(params['language'], supported_languages, language_aliases).lower()\n params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1'\n\n # query and paging\n params['url'] = base_url + search_path\n\n # time range\n if params['time_range'] in time_range_dict:\n params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n for result in dom.xpath('//div[@class=\"dg_u\"]'):\n metadata = loads(result.xpath('.//div[@class=\"vrhdata\"]/@vrhm')[0])\n info = ' - '.join(result.xpath('.//div[@class=\"mc_vtvc_meta_block\"]//span/text()')).strip()\n content = '{0} - {1}'.format(metadata['du'], info)\n thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid'])\n results.append(\n {\n 'url': metadata['murl'],\n 'thumbnail': thumbnail,\n 'title': metadata.get('vt', ''),\n 'content': content,\n 'template': 'videos.html',\n }\n )\n\n return results\n", "path": "searx/engines/bing_videos.py"}]}
1,774
187
gh_patches_debug_22806
rasdani/github-patches
git_diff
sopel-irc__sopel-1859
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> announce: Batch up to MAXTARGETS channels at a time if available The simple loop in `announce.py` is kind of...meh. The number of lines it actually sends to the server could be reduced as much as 75% depending on how many channels Sopel is in when `.announce` is called. Some IRC servers (Hybrid, for example) send a `MAXTARGETS` field in the `ISUPPORT` properties when a client connects. If this is present, it would be useful for `announce.py` to have that information and send its announcement messages to `MAXTARGETS` channels at a time. Long-term thing, more of an optimization than anything. </issue> <code> [start of sopel/modules/announce.py] 1 # coding=utf-8 2 """ 3 announce.py - Sopel Announcement Plugin 4 Sends announcements to all channels the bot has joined. 5 Copyright © 2013, Elad Alfassa, <[email protected]> 6 Licensed under the Eiffel Forum License 2. 7 8 https://sopel.chat 9 """ 10 from __future__ import absolute_import, division, print_function, unicode_literals 11 12 from sopel import plugin 13 14 15 @plugin.command('announce') 16 @plugin.example('.announce Some important message here') 17 @plugin.require_admin('Sorry, I can\'t let you do that', reply=True) 18 @plugin.output_prefix('[ANNOUNCEMENT] ') 19 def announce(bot, trigger): 20 """Send an announcement to all channels the bot is in""" 21 for channel in bot.channels: 22 bot.say(trigger.group(2), channel) 23 bot.reply('Announce complete.') 24 [end of sopel/modules/announce.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py --- a/sopel/modules/announce.py +++ b/sopel/modules/announce.py @@ -12,12 +12,30 @@ from sopel import plugin +def _chunks(items, size): + """Break a list of items into groups. + + :param items: the collection of items to chunk + :type items: :term:`iterable` + :param int size: the size of each chunk + """ + # from https://stackoverflow.com/a/312464/5991 with modified names for readability + for delim in range(0, len(items), size): + yield items[delim:delim + size] + + @plugin.command('announce') @plugin.example('.announce Some important message here') @plugin.require_admin('Sorry, I can\'t let you do that', reply=True) @plugin.output_prefix('[ANNOUNCEMENT] ') def announce(bot, trigger): - """Send an announcement to all channels the bot is in""" - for channel in bot.channels: - bot.say(trigger.group(2), channel) + """Send an announcement to all channels the bot is in.""" + if trigger.group(2) is None: + bot.reply('Announce what? I need a message to say.') + return + + size = bot.isupport.get('TARGMAX', {}).get('PRIVMSG', 1) + channels = _chunks(bot.channels.keys(), size) + for cgroup in channels: + bot.say(trigger.group(2), ','.join(cgroup)) bot.reply('Announce complete.')
{"golden_diff": "diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py\n--- a/sopel/modules/announce.py\n+++ b/sopel/modules/announce.py\n@@ -12,12 +12,30 @@\n from sopel import plugin\n \n \n+def _chunks(items, size):\n+ \"\"\"Break a list of items into groups.\n+\n+ :param items: the collection of items to chunk\n+ :type items: :term:`iterable`\n+ :param int size: the size of each chunk\n+ \"\"\"\n+ # from https://stackoverflow.com/a/312464/5991 with modified names for readability\n+ for delim in range(0, len(items), size):\n+ yield items[delim:delim + size]\n+\n+\n @plugin.command('announce')\n @plugin.example('.announce Some important message here')\n @plugin.require_admin('Sorry, I can\\'t let you do that', reply=True)\n @plugin.output_prefix('[ANNOUNCEMENT] ')\n def announce(bot, trigger):\n- \"\"\"Send an announcement to all channels the bot is in\"\"\"\n- for channel in bot.channels:\n- bot.say(trigger.group(2), channel)\n+ \"\"\"Send an announcement to all channels the bot is in.\"\"\"\n+ if trigger.group(2) is None:\n+ bot.reply('Announce what? I need a message to say.')\n+ return\n+\n+ size = bot.isupport.get('TARGMAX', {}).get('PRIVMSG', 1)\n+ channels = _chunks(bot.channels.keys(), size)\n+ for cgroup in channels:\n+ bot.say(trigger.group(2), ','.join(cgroup))\n bot.reply('Announce complete.')\n", "issue": "announce: Batch up to MAXTARGETS channels at a time if available\nThe simple loop in `announce.py` is kind of...meh. The number of lines it actually sends to the server could be reduced as much as 75% depending on how many channels Sopel is in when `.announce` is called.\r\n\r\nSome IRC servers (Hybrid, for example) send a `MAXTARGETS` field in the `ISUPPORT` properties when a client connects. If this is present, it would be useful for `announce.py` to have that information and send its announcement messages to `MAXTARGETS` channels at a time.\r\n\r\nLong-term thing, more of an optimization than anything.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nannounce.py - Sopel Announcement Plugin\nSends announcements to all channels the bot has joined.\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom sopel import plugin\n\n\[email protected]('announce')\[email protected]('.announce Some important message here')\[email protected]_admin('Sorry, I can\\'t let you do that', reply=True)\[email protected]_prefix('[ANNOUNCEMENT] ')\ndef announce(bot, trigger):\n \"\"\"Send an announcement to all channels the bot is in\"\"\"\n for channel in bot.channels:\n bot.say(trigger.group(2), channel)\n bot.reply('Announce complete.')\n", "path": "sopel/modules/announce.py"}]}
902
374
gh_patches_debug_16342
rasdani/github-patches
git_diff
learningequality__kolibri-8880
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User on SouD should not be able to edit their user profile From @jamalex in bug bash: The FacilityUser object is read-only for single-user certificates, so these changes will not be synced back. We had a spec doc that laid out "what should be allowed on a SoUD", I think this was documented in there — we should do an audit to make sure other cases are covered as well. https://www.notion.so/learningequality/CRITICAL-FEATURE-BUG-Users-on-SoUD-should-not-be-allowed-to-edit-their-user-profile-e5765979e7ed4fcaa694cfe7932ac4df </issue> <code> [start of kolibri/plugins/user_profile/kolibri_plugin.py] 1 from __future__ import absolute_import 2 from __future__ import print_function 3 from __future__ import unicode_literals 4 5 from kolibri.core.hooks import NavigationHook 6 from kolibri.core.webpack import hooks as webpack_hooks 7 from kolibri.plugins import KolibriPluginBase 8 from kolibri.plugins.hooks import register_hook 9 10 11 class UserProfile(KolibriPluginBase): 12 translated_view_urls = "urls" 13 14 @property 15 def url_slug(self): 16 return "profile" 17 18 19 @register_hook 20 class UserAuthAsset(webpack_hooks.WebpackBundleHook): 21 bundle_id = "app" 22 23 24 @register_hook 25 class ProfileNavAction(NavigationHook): 26 bundle_id = "user_profile_side_nav" 27 [end of kolibri/plugins/user_profile/kolibri_plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/plugins/user_profile/kolibri_plugin.py b/kolibri/plugins/user_profile/kolibri_plugin.py --- a/kolibri/plugins/user_profile/kolibri_plugin.py +++ b/kolibri/plugins/user_profile/kolibri_plugin.py @@ -2,6 +2,7 @@ from __future__ import print_function from __future__ import unicode_literals +from kolibri.core.device.utils import get_device_setting from kolibri.core.hooks import NavigationHook from kolibri.core.webpack import hooks as webpack_hooks from kolibri.plugins import KolibriPluginBase @@ -20,6 +21,10 @@ class UserAuthAsset(webpack_hooks.WebpackBundleHook): bundle_id = "app" + @property + def plugin_data(self): + return {"isSubsetOfUsersDevice": get_device_setting("subset_of_users_device")} + @register_hook class ProfileNavAction(NavigationHook):
{"golden_diff": "diff --git a/kolibri/plugins/user_profile/kolibri_plugin.py b/kolibri/plugins/user_profile/kolibri_plugin.py\n--- a/kolibri/plugins/user_profile/kolibri_plugin.py\n+++ b/kolibri/plugins/user_profile/kolibri_plugin.py\n@@ -2,6 +2,7 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+from kolibri.core.device.utils import get_device_setting\n from kolibri.core.hooks import NavigationHook\n from kolibri.core.webpack import hooks as webpack_hooks\n from kolibri.plugins import KolibriPluginBase\n@@ -20,6 +21,10 @@\n class UserAuthAsset(webpack_hooks.WebpackBundleHook):\n bundle_id = \"app\"\n \n+ @property\n+ def plugin_data(self):\n+ return {\"isSubsetOfUsersDevice\": get_device_setting(\"subset_of_users_device\")}\n+\n \n @register_hook\n class ProfileNavAction(NavigationHook):\n", "issue": "User on SouD should not be able to edit their user profile\nFrom @jamalex in bug bash: \r\nThe FacilityUser object is read-only for single-user certificates, so these changes will not be synced back. We had a spec doc that laid out \"what should be allowed on a SoUD\", I think this was documented in there \u2014 we should do an audit to make sure other cases are covered as well.\r\n\r\nhttps://www.notion.so/learningequality/CRITICAL-FEATURE-BUG-Users-on-SoUD-should-not-be-allowed-to-edit-their-user-profile-e5765979e7ed4fcaa694cfe7932ac4df\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.webpack import hooks as webpack_hooks\nfrom kolibri.plugins import KolibriPluginBase\nfrom kolibri.plugins.hooks import register_hook\n\n\nclass UserProfile(KolibriPluginBase):\n translated_view_urls = \"urls\"\n\n @property\n def url_slug(self):\n return \"profile\"\n\n\n@register_hook\nclass UserAuthAsset(webpack_hooks.WebpackBundleHook):\n bundle_id = \"app\"\n\n\n@register_hook\nclass ProfileNavAction(NavigationHook):\n bundle_id = \"user_profile_side_nav\"\n", "path": "kolibri/plugins/user_profile/kolibri_plugin.py"}]}
887
205
gh_patches_debug_18363
rasdani/github-patches
git_diff
pytorch__pytorch-78948
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Functional API for FileLister ### 🚀 The feature, motivation and pitch Similar to https://github.com/pytorch/data/issues/387 This allows for ```python IterableWrapper([...]).list_file() ``` ### Alternatives _No response_ ### Additional context _No response_ cc @VitalyFedyunin @ejguan @NivekT </issue> <code> [start of torch/utils/data/datapipes/iter/filelister.py] 1 from typing import Iterator, List, Sequence, Union 2 3 from torch.utils.data.datapipes.datapipe import IterDataPipe 4 from torch.utils.data.datapipes.iter import IterableWrapper 5 from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root 6 7 __all__ = ["FileListerIterDataPipe", ] 8 9 10 class FileListerIterDataPipe(IterDataPipe[str]): 11 r""" 12 Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory. 13 Multiple root directories can be provided. 14 15 Args: 16 root: Root directory or a sequence of root directories 17 masks: Unix style filter string or string list for filtering file name(s) 18 recursive: Whether to return pathname from nested directories or not 19 abspath: Whether to return relative pathname or absolute pathname 20 non_deterministic: Whether to return pathname in sorted order or not. 21 If ``False``, the results yielded from each root directory will be sorted 22 length: Nominal length of the datapipe 23 24 Example: 25 >>> from torchdata.datapipes.iter import FileLister 26 >>> dp = FileLister(root=".", recursive=True) 27 >>> list(dp) 28 ['example.py', './data/data.tar'] 29 """ 30 31 def __init__( 32 self, 33 root: Union[str, Sequence[str], IterDataPipe] = '.', 34 masks: Union[str, List[str]] = '', 35 *, 36 recursive: bool = False, 37 abspath: bool = False, 38 non_deterministic: bool = False, 39 length: int = -1 40 ) -> None: 41 super().__init__() 42 if isinstance(root, str): 43 root = [root, ] 44 if not isinstance(root, IterDataPipe): 45 root = IterableWrapper(root) 46 self.datapipe: IterDataPipe = root 47 self.masks: Union[str, List[str]] = masks 48 self.recursive: bool = recursive 49 self.abspath: bool = abspath 50 self.non_deterministic: bool = non_deterministic 51 self.length: int = length 52 53 def __iter__(self) -> Iterator[str] : 54 for path in self.datapipe: 55 yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic) 56 57 def __len__(self): 58 if self.length == -1: 59 raise TypeError("{} instance doesn't have valid length".format(type(self).__name__)) 60 return self.length 61 [end of torch/utils/data/datapipes/iter/filelister.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torch/utils/data/datapipes/iter/filelister.py b/torch/utils/data/datapipes/iter/filelister.py --- a/torch/utils/data/datapipes/iter/filelister.py +++ b/torch/utils/data/datapipes/iter/filelister.py @@ -1,5 +1,8 @@ from typing import Iterator, List, Sequence, Union + +from torch.utils.data.datapipes._decorator import functional_datapipe + from torch.utils.data.datapipes.datapipe import IterDataPipe from torch.utils.data.datapipes.iter import IterableWrapper from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root @@ -7,6 +10,7 @@ __all__ = ["FileListerIterDataPipe", ] +@functional_datapipe("list_files") class FileListerIterDataPipe(IterDataPipe[str]): r""" Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
{"golden_diff": "diff --git a/torch/utils/data/datapipes/iter/filelister.py b/torch/utils/data/datapipes/iter/filelister.py\n--- a/torch/utils/data/datapipes/iter/filelister.py\n+++ b/torch/utils/data/datapipes/iter/filelister.py\n@@ -1,5 +1,8 @@\n from typing import Iterator, List, Sequence, Union\n \n+\n+from torch.utils.data.datapipes._decorator import functional_datapipe\n+\n from torch.utils.data.datapipes.datapipe import IterDataPipe\n from torch.utils.data.datapipes.iter import IterableWrapper\n from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root\n@@ -7,6 +10,7 @@\n __all__ = [\"FileListerIterDataPipe\", ]\n \n \n+@functional_datapipe(\"list_files\")\n class FileListerIterDataPipe(IterDataPipe[str]):\n r\"\"\"\n Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.\n", "issue": "Functional API for FileLister\n### \ud83d\ude80 The feature, motivation and pitch\n\nSimilar to https://github.com/pytorch/data/issues/387\r\n\r\nThis allows for \r\n```python\r\nIterableWrapper([...]).list_file()\r\n```\n\n### Alternatives\n\n_No response_\n\n### Additional context\n\n_No response_\n\ncc @VitalyFedyunin @ejguan @NivekT\n", "before_files": [{"content": "from typing import Iterator, List, Sequence, Union\n\nfrom torch.utils.data.datapipes.datapipe import IterDataPipe\nfrom torch.utils.data.datapipes.iter import IterableWrapper\nfrom torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root\n\n__all__ = [\"FileListerIterDataPipe\", ]\n\n\nclass FileListerIterDataPipe(IterDataPipe[str]):\n r\"\"\"\n Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.\n Multiple root directories can be provided.\n\n Args:\n root: Root directory or a sequence of root directories\n masks: Unix style filter string or string list for filtering file name(s)\n recursive: Whether to return pathname from nested directories or not\n abspath: Whether to return relative pathname or absolute pathname\n non_deterministic: Whether to return pathname in sorted order or not.\n If ``False``, the results yielded from each root directory will be sorted\n length: Nominal length of the datapipe\n\n Example:\n >>> from torchdata.datapipes.iter import FileLister\n >>> dp = FileLister(root=\".\", recursive=True)\n >>> list(dp)\n ['example.py', './data/data.tar']\n \"\"\"\n\n def __init__(\n self,\n root: Union[str, Sequence[str], IterDataPipe] = '.',\n masks: Union[str, List[str]] = '',\n *,\n recursive: bool = False,\n abspath: bool = False,\n non_deterministic: bool = False,\n length: int = -1\n ) -> None:\n super().__init__()\n if isinstance(root, str):\n root = [root, ]\n if not isinstance(root, IterDataPipe):\n root = IterableWrapper(root)\n self.datapipe: IterDataPipe = root\n self.masks: Union[str, List[str]] = masks\n self.recursive: bool = recursive\n self.abspath: bool = abspath\n self.non_deterministic: bool = non_deterministic\n self.length: int = length\n\n def __iter__(self) -> Iterator[str] :\n for path in self.datapipe:\n yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic)\n\n def __len__(self):\n if self.length == -1:\n raise TypeError(\"{} instance doesn't have valid length\".format(type(self).__name__))\n return self.length\n", "path": "torch/utils/data/datapipes/iter/filelister.py"}]}
1,282
227
gh_patches_debug_13893
rasdani/github-patches
git_diff
litestar-org__litestar-1370
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: msgspec version needs updating ### Description Starlite currently needs msgspec >= 0.11.0 from here https://github.com/starlite-api/starlite/blob/8a49aff754d5b409397183eb395cac5a44193d72/pyproject.toml#L52. Starlite uses msgspec.inspect: https://github.com/starlite-api/starlite/blob/41089a2d5dff696a25c78a58322d3a8a08ebcac3/starlite/constants.py#L3 msgspec.inspect wasn't introduced in msgspec until 0.12.0: https://github.com/jcrist/msgspec/releases/tag/0.12.0 ### URL to code causing the issue _No response_ ### MCVE _No response_ ### Steps to reproduce ```bash 1. In a new environment, pip install starlite msgspec==0.11.0 2. Try to import starlite ``` ### Screenshots _No response_ ### Logs _No response_ ### Starlite Version 1.51.7 ### Platform - [ ] Linux - [ ] Mac - [ ] Windows - [ ] Other (Please specify in the description above) </issue> <code> [start of starlite/constants.py] 1 from inspect import Signature 2 3 from msgspec.inspect import UNSET 4 from pydantic.fields import Undefined 5 6 from starlite.types import Empty 7 8 DEFAULT_ALLOWED_CORS_HEADERS = {"Accept", "Accept-Language", "Content-Language", "Content-Type"} 9 DEFAULT_CHUNK_SIZE = 1024 * 128 # 128KB 10 HTTP_RESPONSE_BODY = "http.response.body" 11 HTTP_RESPONSE_START = "http.response.start" 12 ONE_MEGABYTE = 1024 * 1024 13 REDIRECT_STATUS_CODES = {301, 302, 303, 307, 308} 14 RESERVED_KWARGS = {"state", "headers", "cookies", "request", "socket", "data", "query", "scope", "body"} 15 SCOPE_STATE_DEPENDENCY_CACHE = "dependency_cache" 16 SCOPE_STATE_NAMESPACE = "__starlite__" 17 SCOPE_STATE_RESPONSE_COMPRESSED = "response_compressed" 18 UNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis} 19 SKIP_VALIDATION_NAMES = {"request", "socket", "scope", "receive", "send"} 20 [end of starlite/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlite/constants.py b/starlite/constants.py --- a/starlite/constants.py +++ b/starlite/constants.py @@ -1,6 +1,5 @@ from inspect import Signature -from msgspec.inspect import UNSET from pydantic.fields import Undefined from starlite.types import Empty @@ -15,5 +14,5 @@ SCOPE_STATE_DEPENDENCY_CACHE = "dependency_cache" SCOPE_STATE_NAMESPACE = "__starlite__" SCOPE_STATE_RESPONSE_COMPRESSED = "response_compressed" -UNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis} +UNDEFINED_SENTINELS = {Undefined, Signature.empty, Empty, Ellipsis} SKIP_VALIDATION_NAMES = {"request", "socket", "scope", "receive", "send"}
{"golden_diff": "diff --git a/starlite/constants.py b/starlite/constants.py\n--- a/starlite/constants.py\n+++ b/starlite/constants.py\n@@ -1,6 +1,5 @@\n from inspect import Signature\n \n-from msgspec.inspect import UNSET\n from pydantic.fields import Undefined\n \n from starlite.types import Empty\n@@ -15,5 +14,5 @@\n SCOPE_STATE_DEPENDENCY_CACHE = \"dependency_cache\"\n SCOPE_STATE_NAMESPACE = \"__starlite__\"\n SCOPE_STATE_RESPONSE_COMPRESSED = \"response_compressed\"\n-UNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis}\n+UNDEFINED_SENTINELS = {Undefined, Signature.empty, Empty, Ellipsis}\n SKIP_VALIDATION_NAMES = {\"request\", \"socket\", \"scope\", \"receive\", \"send\"}\n", "issue": "Bug: msgspec version needs updating\n### Description\n\nStarlite currently needs msgspec >= 0.11.0 from here https://github.com/starlite-api/starlite/blob/8a49aff754d5b409397183eb395cac5a44193d72/pyproject.toml#L52.\r\n\r\nStarlite uses msgspec.inspect:\r\nhttps://github.com/starlite-api/starlite/blob/41089a2d5dff696a25c78a58322d3a8a08ebcac3/starlite/constants.py#L3\r\n\r\nmsgspec.inspect wasn't introduced in msgspec until 0.12.0:\r\nhttps://github.com/jcrist/msgspec/releases/tag/0.12.0\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n_No response_\n\n### Steps to reproduce\n\n```bash\n1. In a new environment, pip install starlite msgspec==0.11.0\r\n2. Try to import starlite\n```\n\n\n### Screenshots\n\n_No response_\n\n### Logs\n\n_No response_\n\n### Starlite Version\n\n1.51.7\n\n### Platform\n\n- [ ] Linux\n- [ ] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n", "before_files": [{"content": "from inspect import Signature\n\nfrom msgspec.inspect import UNSET\nfrom pydantic.fields import Undefined\n\nfrom starlite.types import Empty\n\nDEFAULT_ALLOWED_CORS_HEADERS = {\"Accept\", \"Accept-Language\", \"Content-Language\", \"Content-Type\"}\nDEFAULT_CHUNK_SIZE = 1024 * 128 # 128KB\nHTTP_RESPONSE_BODY = \"http.response.body\"\nHTTP_RESPONSE_START = \"http.response.start\"\nONE_MEGABYTE = 1024 * 1024\nREDIRECT_STATUS_CODES = {301, 302, 303, 307, 308}\nRESERVED_KWARGS = {\"state\", \"headers\", \"cookies\", \"request\", \"socket\", \"data\", \"query\", \"scope\", \"body\"}\nSCOPE_STATE_DEPENDENCY_CACHE = \"dependency_cache\"\nSCOPE_STATE_NAMESPACE = \"__starlite__\"\nSCOPE_STATE_RESPONSE_COMPRESSED = \"response_compressed\"\nUNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis}\nSKIP_VALIDATION_NAMES = {\"request\", \"socket\", \"scope\", \"receive\", \"send\"}\n", "path": "starlite/constants.py"}]}
1,102
171
gh_patches_debug_6437
rasdani/github-patches
git_diff
liqd__a4-meinberlin-532
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Participated projects visible for all on user profile If I write a comment on platform meinBerlin and take part in a participation process, everyone can see my profile and in which participation process I took part, even I'm not logged in. It's not anonymous enough. </issue> <code> [start of apps/users/views.py] 1 from django.views.generic.detail import DetailView 2 3 from adhocracy4.projects.models import Project 4 5 from . import models 6 7 8 class ProfileView(DetailView): 9 model = models.User 10 slug_field = 'username' 11 12 @property 13 def get_participated_projects(self): 14 user = self.object 15 16 qs = Project.objects.filter( 17 action__actor=user 18 ).distinct() 19 20 return qs 21 [end of apps/users/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/users/views.py b/apps/users/views.py --- a/apps/users/views.py +++ b/apps/users/views.py @@ -1,20 +1,8 @@ from django.views.generic.detail import DetailView -from adhocracy4.projects.models import Project - from . import models class ProfileView(DetailView): model = models.User slug_field = 'username' - - @property - def get_participated_projects(self): - user = self.object - - qs = Project.objects.filter( - action__actor=user - ).distinct() - - return qs
{"golden_diff": "diff --git a/apps/users/views.py b/apps/users/views.py\n--- a/apps/users/views.py\n+++ b/apps/users/views.py\n@@ -1,20 +1,8 @@\n from django.views.generic.detail import DetailView\n \n-from adhocracy4.projects.models import Project\n-\n from . import models\n \n \n class ProfileView(DetailView):\n model = models.User\n slug_field = 'username'\n-\n- @property\n- def get_participated_projects(self):\n- user = self.object\n-\n- qs = Project.objects.filter(\n- action__actor=user\n- ).distinct()\n-\n- return qs\n", "issue": "Participated projects visible for all on user profile\nIf I write a comment on platform meinBerlin and take part in a participation process, everyone can see my profile and in which participation process I took part, even I'm not logged in. It's not anonymous enough.\n", "before_files": [{"content": "from django.views.generic.detail import DetailView\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import models\n\n\nclass ProfileView(DetailView):\n model = models.User\n slug_field = 'username'\n\n @property\n def get_participated_projects(self):\n user = self.object\n\n qs = Project.objects.filter(\n action__actor=user\n ).distinct()\n\n return qs\n", "path": "apps/users/views.py"}]}
713
136
gh_patches_debug_38811
rasdani/github-patches
git_diff
xonsh__xonsh-4673
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> make promp.env.env_name venv --prompt aware (set in pyvenv.cfg) ## xonfig <details> ``` > xonfig <xonsh-code>:1:0 - xonfig <xonsh-code>:1:0 + ![xonfig] +------------------+----------------------+ | xonsh | 0.11.0 | | Git SHA | adfa60ea | | Commit Date | Feb 11 14:53:00 2022 | | Python | 3.9.7 | | PLY | 3.11 | | have readline | True | | prompt toolkit | None | | shell type | readline | | history backend | json | | pygments | 2.11.2 | | on posix | True | | on linux | True | | distro | unknown | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | on jupyter | False | | jupyter kernel | None | | xontrib 1 | coreutils | | xontrib 2 | vox | | xontrib 3 | voxapi | | RC file 1 | /home/ob/.xonshrc | +------------------+----------------------+ ``` </details> ## Expected Behavior When activating a venv via `vox activate` the name should be set to the value of `prompt` key in `pyvenv.cfg` if present (file and key) - see https://docs.python.org/3/library/venv.html. ## Current Behavior The prompt is always set to name of venv directory independent of prompt settings. ## Steps to Reproduce ```shell $ python -m venv --prompt "MY SPECIAL PROMPT" .venv $ cat .venv/pyvenv.cfg | grep prompt prompt = 'MY SPECIAL PROMPT' vox activate ./.venv ``` new prompt looks like this: `[17:58:10] (.venv) ob@ob1 ~/oss/xonsh dropthedot|✓` but should look like this: `[17:58:10] (MY SPECIAL PROMPT) ob@ob1 ~/oss/xonsh dropthedot|✓` ## Also add `--prompt` to `vox new`? If this is done, maybe a good idea would be to also add `--prompt` to `vox new` to make this possible on creation (also in the interest of symmetry), but it seems like the common pattern for many xonsh users is to have virtualenvs in `~/.virtualenv` with the venv folder name being the same as the project, therefore automatically having a meaningful prompt, so this does not seem to be an urgently wanted feature for most. I want to prepare a PR for the "passive" part of the functionality, but could also have a stab at adding `--prompt` to `vox new` at a later date. My main itch would be to respect `prompt` from already existing venvs first. ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment** </issue> <code> [start of xonsh/prompt/env.py] 1 """Prompt formatter for virtualenv and others""" 2 3 import os 4 5 from xonsh.built_ins import XSH 6 7 8 def find_env_name(): 9 """Finds the current environment name from $VIRTUAL_ENV or 10 $CONDA_DEFAULT_ENV if that is set. 11 """ 12 env_path = XSH.env.get("VIRTUAL_ENV", "") 13 if env_path: 14 env_name = os.path.basename(env_path) 15 else: 16 env_name = XSH.env.get("CONDA_DEFAULT_ENV", "") 17 return env_name 18 19 20 def env_name(): 21 """Returns the current env_name if it non-empty, surrounded by the 22 ``{env_prefix}`` and ``{env_postfix}`` fields. 23 """ 24 env_name = find_env_name() 25 if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT") or not env_name: 26 # env name prompt printing disabled, or no environment; just return 27 return 28 29 venv_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") 30 if venv_prompt is not None: 31 return venv_prompt 32 else: 33 pf = XSH.shell.prompt_formatter 34 pre = pf._get_field_value("env_prefix") 35 post = pf._get_field_value("env_postfix") 36 return pre + env_name + post 37 38 39 def vte_new_tab_cwd(): 40 """This prints an escape sequence that tells VTE terminals the hostname 41 and pwd. This should not be needed in most cases, but sometimes is for 42 certain Linux terminals that do not read the PWD from the environment 43 on startup. Note that this does not return a string, it simply prints 44 and flushes the escape sequence to stdout directly. 45 """ 46 env = XSH.env 47 t = "\033]7;file://{}{}\007" 48 s = t.format(env.get("HOSTNAME"), env.get("PWD")) 49 print(s, end="", flush=True) 50 [end of xonsh/prompt/env.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/prompt/env.py b/xonsh/prompt/env.py --- a/xonsh/prompt/env.py +++ b/xonsh/prompt/env.py @@ -1,42 +1,70 @@ """Prompt formatter for virtualenv and others""" - -import os +import functools +import re +from pathlib import Path +from typing import Optional from xonsh.built_ins import XSH -def find_env_name(): - """Finds the current environment name from $VIRTUAL_ENV or - $CONDA_DEFAULT_ENV if that is set. +def find_env_name() -> Optional[str]: + """Find current environment name from available sources. + + If ``$VIRTUAL_ENV`` is set, it is determined from the prompt setting in + ``<venv>/pyvenv.cfg`` or from the folder name of the environment. + + Otherwise - if it is set - from ``$CONDA_DEFAULT_ENV``. + """ + virtual_env = XSH.env.get("VIRTUAL_ENV") + if virtual_env: + name = _determine_env_name(virtual_env) + if name: + return name + conda_default_env = XSH.env.get("CONDA_DEFAULT_ENV") + if conda_default_env: + return conda_default_env + + +def env_name() -> str: + """Build env_name based on different sources. Respect order of precedence. + + Name from VIRTUAL_ENV_PROMPT will be used as-is. + Names from other sources are surrounded with ``{env_prefix}`` and + ``{env_postfix}`` fields. """ - env_path = XSH.env.get("VIRTUAL_ENV", "") - if env_path: - env_name = os.path.basename(env_path) - else: - env_name = XSH.env.get("CONDA_DEFAULT_ENV", "") - return env_name + if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT"): + return "" + virtual_env_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") + if virtual_env_prompt: + return virtual_env_prompt + found_envname = find_env_name() + return _surround_env_name(found_envname) if found_envname else "" + [email protected]_cache(maxsize=5) +def _determine_env_name(virtual_env: str) -> str: + """Use prompt setting from pyvenv.cfg or basename of virtual_env. -def env_name(): - """Returns the current env_name if it non-empty, surrounded by the - ``{env_prefix}`` and ``{env_postfix}`` fields. + Tries to be resilient to subtle changes in whitespace and quoting in the + configuration file format as it adheres to no clear standard. """ - env_name = find_env_name() - if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT") or not env_name: - # env name prompt printing disabled, or no environment; just return - return - - venv_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") - if venv_prompt is not None: - return venv_prompt - else: - pf = XSH.shell.prompt_formatter - pre = pf._get_field_value("env_prefix") - post = pf._get_field_value("env_postfix") - return pre + env_name + post - - -def vte_new_tab_cwd(): + venv_path = Path(virtual_env) + pyvenv_cfg = venv_path / "pyvenv.cfg" + if pyvenv_cfg.is_file(): + match = re.search(r"prompt\s*=\s*(.*)", pyvenv_cfg.read_text()) + if match: + return match.group(1).strip().lstrip("'\"").rstrip("'\"") + return venv_path.name + + +def _surround_env_name(name: str) -> str: + pf = XSH.shell.prompt_formatter + pre = pf._get_field_value("env_prefix") + post = pf._get_field_value("env_postfix") + return f"{pre}{name}{post}" + + +def vte_new_tab_cwd() -> None: """This prints an escape sequence that tells VTE terminals the hostname and pwd. This should not be needed in most cases, but sometimes is for certain Linux terminals that do not read the PWD from the environment
{"golden_diff": "diff --git a/xonsh/prompt/env.py b/xonsh/prompt/env.py\n--- a/xonsh/prompt/env.py\n+++ b/xonsh/prompt/env.py\n@@ -1,42 +1,70 @@\n \"\"\"Prompt formatter for virtualenv and others\"\"\"\n-\n-import os\n+import functools\n+import re\n+from pathlib import Path\n+from typing import Optional\n \n from xonsh.built_ins import XSH\n \n \n-def find_env_name():\n- \"\"\"Finds the current environment name from $VIRTUAL_ENV or\n- $CONDA_DEFAULT_ENV if that is set.\n+def find_env_name() -> Optional[str]:\n+ \"\"\"Find current environment name from available sources.\n+\n+ If ``$VIRTUAL_ENV`` is set, it is determined from the prompt setting in\n+ ``<venv>/pyvenv.cfg`` or from the folder name of the environment.\n+\n+ Otherwise - if it is set - from ``$CONDA_DEFAULT_ENV``.\n+ \"\"\"\n+ virtual_env = XSH.env.get(\"VIRTUAL_ENV\")\n+ if virtual_env:\n+ name = _determine_env_name(virtual_env)\n+ if name:\n+ return name\n+ conda_default_env = XSH.env.get(\"CONDA_DEFAULT_ENV\")\n+ if conda_default_env:\n+ return conda_default_env\n+\n+\n+def env_name() -> str:\n+ \"\"\"Build env_name based on different sources. Respect order of precedence.\n+\n+ Name from VIRTUAL_ENV_PROMPT will be used as-is.\n+ Names from other sources are surrounded with ``{env_prefix}`` and\n+ ``{env_postfix}`` fields.\n \"\"\"\n- env_path = XSH.env.get(\"VIRTUAL_ENV\", \"\")\n- if env_path:\n- env_name = os.path.basename(env_path)\n- else:\n- env_name = XSH.env.get(\"CONDA_DEFAULT_ENV\", \"\")\n- return env_name\n+ if XSH.env.get(\"VIRTUAL_ENV_DISABLE_PROMPT\"):\n+ return \"\"\n+ virtual_env_prompt = XSH.env.get(\"VIRTUAL_ENV_PROMPT\")\n+ if virtual_env_prompt:\n+ return virtual_env_prompt\n+ found_envname = find_env_name()\n+ return _surround_env_name(found_envname) if found_envname else \"\"\n+\n \[email protected]_cache(maxsize=5)\n+def _determine_env_name(virtual_env: str) -> str:\n+ \"\"\"Use prompt setting from pyvenv.cfg or basename of virtual_env.\n \n-def env_name():\n- \"\"\"Returns the current env_name if it non-empty, surrounded by the\n- ``{env_prefix}`` and ``{env_postfix}`` fields.\n+ Tries to be resilient to subtle changes in whitespace and quoting in the\n+ configuration file format as it adheres to no clear standard.\n \"\"\"\n- env_name = find_env_name()\n- if XSH.env.get(\"VIRTUAL_ENV_DISABLE_PROMPT\") or not env_name:\n- # env name prompt printing disabled, or no environment; just return\n- return\n-\n- venv_prompt = XSH.env.get(\"VIRTUAL_ENV_PROMPT\")\n- if venv_prompt is not None:\n- return venv_prompt\n- else:\n- pf = XSH.shell.prompt_formatter\n- pre = pf._get_field_value(\"env_prefix\")\n- post = pf._get_field_value(\"env_postfix\")\n- return pre + env_name + post\n-\n-\n-def vte_new_tab_cwd():\n+ venv_path = Path(virtual_env)\n+ pyvenv_cfg = venv_path / \"pyvenv.cfg\"\n+ if pyvenv_cfg.is_file():\n+ match = re.search(r\"prompt\\s*=\\s*(.*)\", pyvenv_cfg.read_text())\n+ if match:\n+ return match.group(1).strip().lstrip(\"'\\\"\").rstrip(\"'\\\"\")\n+ return venv_path.name\n+\n+\n+def _surround_env_name(name: str) -> str:\n+ pf = XSH.shell.prompt_formatter\n+ pre = pf._get_field_value(\"env_prefix\")\n+ post = pf._get_field_value(\"env_postfix\")\n+ return f\"{pre}{name}{post}\"\n+\n+\n+def vte_new_tab_cwd() -> None:\n \"\"\"This prints an escape sequence that tells VTE terminals the hostname\n and pwd. This should not be needed in most cases, but sometimes is for\n certain Linux terminals that do not read the PWD from the environment\n", "issue": "make promp.env.env_name venv --prompt aware (set in pyvenv.cfg)\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n> xonfig\r\n<xonsh-code>:1:0 - xonfig\r\n<xonsh-code>:1:0 + ![xonfig]\r\n+------------------+----------------------+\r\n| xonsh | 0.11.0 |\r\n| Git SHA | adfa60ea |\r\n| Commit Date | Feb 11 14:53:00 2022 |\r\n| Python | 3.9.7 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | None |\r\n| shell type | readline |\r\n| history backend | json |\r\n| pygments | 2.11.2 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | unknown |\r\n| on wsl | False |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n| on jupyter | False |\r\n| jupyter kernel | None |\r\n| xontrib 1 | coreutils |\r\n| xontrib 2 | vox |\r\n| xontrib 3 | voxapi |\r\n| RC file 1 | /home/ob/.xonshrc |\r\n+------------------+----------------------+\r\n```\r\n\r\n</details>\r\n\r\n## Expected Behavior\r\n\r\nWhen activating a venv via `vox activate` the name should be set to the value of `prompt` key in `pyvenv.cfg` if present (file and key) - see https://docs.python.org/3/library/venv.html.\r\n\r\n## Current Behavior\r\n\r\nThe prompt is always set to name of venv directory independent of prompt settings.\r\n\r\n## Steps to Reproduce\r\n\r\n```shell\r\n$ python -m venv --prompt \"MY SPECIAL PROMPT\" .venv\r\n$ cat .venv/pyvenv.cfg | grep prompt\r\nprompt = 'MY SPECIAL PROMPT'\r\nvox activate ./.venv\r\n```\r\n\r\nnew prompt looks like this:\r\n`[17:58:10] (.venv) ob@ob1 ~/oss/xonsh dropthedot|\u2713`\r\n\r\nbut should look like this:\r\n`[17:58:10] (MY SPECIAL PROMPT) ob@ob1 ~/oss/xonsh dropthedot|\u2713`\r\n\r\n## Also add `--prompt` to `vox new`?\r\nIf this is done, maybe a good idea would be to also add `--prompt` to `vox new` to make this possible on creation (also in the interest of symmetry), but it seems like the common pattern for many xonsh users is to have virtualenvs in `~/.virtualenv` with the venv folder name being the same as the project, therefore automatically having a meaningful prompt, so this does not seem to be an urgently wanted feature for most.\r\n\r\nI want to prepare a PR for the \"passive\" part of the functionality, but could also have a stab at adding `--prompt` to `vox new` at a later date. My main itch would be to respect `prompt` from already existing venvs first.\r\n\r\n## For community\r\n\u2b07\ufe0f **Please click the \ud83d\udc4d reaction instead of leaving a `+1` or \ud83d\udc4d comment**\r\n\n", "before_files": [{"content": "\"\"\"Prompt formatter for virtualenv and others\"\"\"\n\nimport os\n\nfrom xonsh.built_ins import XSH\n\n\ndef find_env_name():\n \"\"\"Finds the current environment name from $VIRTUAL_ENV or\n $CONDA_DEFAULT_ENV if that is set.\n \"\"\"\n env_path = XSH.env.get(\"VIRTUAL_ENV\", \"\")\n if env_path:\n env_name = os.path.basename(env_path)\n else:\n env_name = XSH.env.get(\"CONDA_DEFAULT_ENV\", \"\")\n return env_name\n\n\ndef env_name():\n \"\"\"Returns the current env_name if it non-empty, surrounded by the\n ``{env_prefix}`` and ``{env_postfix}`` fields.\n \"\"\"\n env_name = find_env_name()\n if XSH.env.get(\"VIRTUAL_ENV_DISABLE_PROMPT\") or not env_name:\n # env name prompt printing disabled, or no environment; just return\n return\n\n venv_prompt = XSH.env.get(\"VIRTUAL_ENV_PROMPT\")\n if venv_prompt is not None:\n return venv_prompt\n else:\n pf = XSH.shell.prompt_formatter\n pre = pf._get_field_value(\"env_prefix\")\n post = pf._get_field_value(\"env_postfix\")\n return pre + env_name + post\n\n\ndef vte_new_tab_cwd():\n \"\"\"This prints an escape sequence that tells VTE terminals the hostname\n and pwd. This should not be needed in most cases, but sometimes is for\n certain Linux terminals that do not read the PWD from the environment\n on startup. Note that this does not return a string, it simply prints\n and flushes the escape sequence to stdout directly.\n \"\"\"\n env = XSH.env\n t = \"\\033]7;file://{}{}\\007\"\n s = t.format(env.get(\"HOSTNAME\"), env.get(\"PWD\"))\n print(s, end=\"\", flush=True)\n", "path": "xonsh/prompt/env.py"}]}
1,827
978
gh_patches_debug_50236
rasdani/github-patches
git_diff
pex-tool__pex-577
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 1.4.8 User facing issues on the docket: + non-zip_safe pex extraction has a race on renaming #557 + Execute pex archive at runtime with -m parameter #547 + OSX's python 2.7.10 interpreter reports a bad local platform, bdists fail to resolve #523 + `pex.resolver.resolve` does the wrong thing when given an `interpreter` and no `platform` #511 + [Errno 13] Permission denied when executing with bdist_pex #570 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '1.4.7' 5 6 # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems 7 # for pex code so we exclude that range. 8 SETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*' 9 10 WHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32' 11 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,7 +1,7 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '1.4.7' +__version__ = '1.4.8' # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems # for pex code so we exclude that range.
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,7 +1,7 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.4.7'\n+__version__ = '1.4.8'\n \n # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n # for pex code so we exclude that range.\n", "issue": "Release 1.4.8\nUser facing issues on the docket:\r\n+ non-zip_safe pex extraction has a race on renaming #557\r\n+ Execute pex archive at runtime with -m parameter #547\r\n+ OSX's python 2.7.10 interpreter reports a bad local platform, bdists fail to resolve #523 \r\n+ `pex.resolver.resolve` does the wrong thing when given an `interpreter` and no `platform` #511\r\n+ [Errno 13] Permission denied when executing with bdist_pex #570\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.4.7'\n\n# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n# for pex code so we exclude that range.\nSETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'\n\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'\n", "path": "pex/version.py"}]}
818
144
gh_patches_debug_21241
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2280
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> events not editable for groupmembers group member CAN edit a project but CANNOT an event related to the project. 403 error. </issue> <code> [start of meinberlin/apps/offlineevents/rules.py] 1 import rules 2 from rules.predicates import is_superuser 3 4 from adhocracy4.modules import predicates as module_predicates 5 from adhocracy4.organisations.predicates import is_initiator 6 from adhocracy4.projects.predicates import is_moderator 7 8 rules.add_perm( 9 'meinberlin_offlineevents.list_offlineevent', 10 is_superuser | is_initiator | is_moderator 11 ) 12 13 rules.add_perm( 14 'meinberlin_offlineevents.view_offlineevent', 15 module_predicates.is_allowed_view_item 16 ) 17 18 rules.add_perm( 19 'meinberlin_offlineevents.add_offlineevent', 20 is_superuser | is_initiator | is_moderator 21 ) 22 23 rules.add_perm( 24 'meinberlin_offlineevents.change_offlineevent', 25 is_superuser | is_initiator | is_moderator 26 ) 27 [end of meinberlin/apps/offlineevents/rules.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/offlineevents/rules.py b/meinberlin/apps/offlineevents/rules.py --- a/meinberlin/apps/offlineevents/rules.py +++ b/meinberlin/apps/offlineevents/rules.py @@ -4,10 +4,11 @@ from adhocracy4.modules import predicates as module_predicates from adhocracy4.organisations.predicates import is_initiator from adhocracy4.projects.predicates import is_moderator +from adhocracy4.projects.predicates import is_prj_group_member rules.add_perm( 'meinberlin_offlineevents.list_offlineevent', - is_superuser | is_initiator | is_moderator + is_superuser | is_initiator | is_moderator | is_prj_group_member ) rules.add_perm( @@ -17,10 +18,10 @@ rules.add_perm( 'meinberlin_offlineevents.add_offlineevent', - is_superuser | is_initiator | is_moderator + is_superuser | is_initiator | is_moderator | is_prj_group_member ) rules.add_perm( 'meinberlin_offlineevents.change_offlineevent', - is_superuser | is_initiator | is_moderator + is_superuser | is_initiator | is_moderator | is_prj_group_member )
{"golden_diff": "diff --git a/meinberlin/apps/offlineevents/rules.py b/meinberlin/apps/offlineevents/rules.py\n--- a/meinberlin/apps/offlineevents/rules.py\n+++ b/meinberlin/apps/offlineevents/rules.py\n@@ -4,10 +4,11 @@\n from adhocracy4.modules import predicates as module_predicates\n from adhocracy4.organisations.predicates import is_initiator\n from adhocracy4.projects.predicates import is_moderator\n+from adhocracy4.projects.predicates import is_prj_group_member\n \n rules.add_perm(\n 'meinberlin_offlineevents.list_offlineevent',\n- is_superuser | is_initiator | is_moderator\n+ is_superuser | is_initiator | is_moderator | is_prj_group_member\n )\n \n rules.add_perm(\n@@ -17,10 +18,10 @@\n \n rules.add_perm(\n 'meinberlin_offlineevents.add_offlineevent',\n- is_superuser | is_initiator | is_moderator\n+ is_superuser | is_initiator | is_moderator | is_prj_group_member\n )\n \n rules.add_perm(\n 'meinberlin_offlineevents.change_offlineevent',\n- is_superuser | is_initiator | is_moderator\n+ is_superuser | is_initiator | is_moderator | is_prj_group_member\n )\n", "issue": "events not editable for groupmembers\ngroup member CAN edit a project but CANNOT an event related to the project. 403 error.\n", "before_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.modules import predicates as module_predicates\nfrom adhocracy4.organisations.predicates import is_initiator\nfrom adhocracy4.projects.predicates import is_moderator\n\nrules.add_perm(\n 'meinberlin_offlineevents.list_offlineevent',\n is_superuser | is_initiator | is_moderator\n)\n\nrules.add_perm(\n 'meinberlin_offlineevents.view_offlineevent',\n module_predicates.is_allowed_view_item\n)\n\nrules.add_perm(\n 'meinberlin_offlineevents.add_offlineevent',\n is_superuser | is_initiator | is_moderator\n)\n\nrules.add_perm(\n 'meinberlin_offlineevents.change_offlineevent',\n is_superuser | is_initiator | is_moderator\n)\n", "path": "meinberlin/apps/offlineevents/rules.py"}]}
799
302
gh_patches_debug_8409
rasdani/github-patches
git_diff
pre-commit__pre-commit-478
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `docker build` argument order is invalid on old versions of Docker We do: `docker build . --tag thing --label thing` But this produces an error on Docker 1.11.2: ``` ckuehl@dev4-uswest1cdevc:~/proj/pre-commit$ docker build . --tag thing --label thing docker: "build" requires 1 argument. See 'docker build --help'. Usage: docker build [OPTIONS] PATH | URL | - Build an image from a Dockerfile ``` The path needs to go at the end on 1.11.2, but it works on 1.13.0 as-is. We should probably just change the order of the arguments to make every version happy. </issue> <code> [start of pre_commit/languages/docker.py] 1 from __future__ import absolute_import 2 from __future__ import unicode_literals 3 4 import hashlib 5 import os 6 7 from pre_commit import five 8 from pre_commit.languages import helpers 9 from pre_commit.util import CalledProcessError 10 from pre_commit.util import clean_path_on_failure 11 from pre_commit.util import cmd_output 12 from pre_commit.xargs import xargs 13 14 15 ENVIRONMENT_DIR = 'docker' 16 PRE_COMMIT_LABEL = 'PRE_COMMIT' 17 18 19 def md5(s): # pragma: windows no cover 20 return hashlib.md5(five.to_bytes(s)).hexdigest() 21 22 23 def docker_tag(repo_cmd_runner): # pragma: windows no cover 24 return 'pre-commit-{}'.format( 25 md5(os.path.basename(repo_cmd_runner.path())) 26 ).lower() 27 28 29 def docker_is_running(): # pragma: windows no cover 30 try: 31 return cmd_output('docker', 'ps')[0] == 0 32 except CalledProcessError: 33 return False 34 35 36 def assert_docker_available(): # pragma: windows no cover 37 assert docker_is_running(), ( 38 'Docker is either not running or not configured in this environment' 39 ) 40 41 42 def build_docker_image(repo_cmd_runner, **kwargs): # pragma: windows no cover 43 pull = kwargs.pop('pull') 44 assert not kwargs, kwargs 45 cmd = ( 46 'docker', 'build', '.', 47 '--tag', docker_tag(repo_cmd_runner), 48 '--label', PRE_COMMIT_LABEL, 49 ) 50 if pull: 51 cmd += ('--pull',) 52 helpers.run_setup_cmd(repo_cmd_runner, cmd) 53 54 55 def install_environment( 56 repo_cmd_runner, 57 version='default', 58 additional_dependencies=(), 59 ): # pragma: windows no cover 60 assert repo_cmd_runner.exists('Dockerfile'), ( 61 'No Dockerfile was found in the hook repository' 62 ) 63 helpers.assert_version_default('docker', version) 64 helpers.assert_no_additional_deps('docker', additional_dependencies) 65 assert_docker_available() 66 67 directory = repo_cmd_runner.path( 68 helpers.environment_dir(ENVIRONMENT_DIR, 'default'), 69 ) 70 71 # Docker doesn't really have relevant disk environment, but pre-commit 72 # still needs to cleanup it's state files on failure 73 with clean_path_on_failure(directory): 74 build_docker_image(repo_cmd_runner, pull=True) 75 os.mkdir(directory) 76 77 78 def run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover 79 assert_docker_available() 80 # Rebuild the docker image in case it has gone missing, as many people do 81 # automated cleanup of docker images. 82 build_docker_image(repo_cmd_runner, pull=False) 83 84 hook_cmd = helpers.to_cmd(hook) 85 entry_executable, cmd_rest = hook_cmd[0], hook_cmd[1:] 86 87 cmd = ( 88 'docker', 'run', 89 '--rm', 90 '-u', '{}:{}'.format(os.getuid(), os.getgid()), 91 '-v', '{}:/src:rw'.format(os.getcwd()), 92 '--workdir', '/src', 93 '--entrypoint', entry_executable, 94 docker_tag(repo_cmd_runner) 95 ) + cmd_rest 96 97 return xargs(cmd, file_args) 98 [end of pre_commit/languages/docker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py --- a/pre_commit/languages/docker.py +++ b/pre_commit/languages/docker.py @@ -43,12 +43,14 @@ pull = kwargs.pop('pull') assert not kwargs, kwargs cmd = ( - 'docker', 'build', '.', + 'docker', 'build', '--tag', docker_tag(repo_cmd_runner), '--label', PRE_COMMIT_LABEL, ) if pull: cmd += ('--pull',) + # This must come last for old versions of docker. See #477 + cmd += ('.',) helpers.run_setup_cmd(repo_cmd_runner, cmd)
{"golden_diff": "diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py\n--- a/pre_commit/languages/docker.py\n+++ b/pre_commit/languages/docker.py\n@@ -43,12 +43,14 @@\n pull = kwargs.pop('pull')\n assert not kwargs, kwargs\n cmd = (\n- 'docker', 'build', '.',\n+ 'docker', 'build',\n '--tag', docker_tag(repo_cmd_runner),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n+ # This must come last for old versions of docker. See #477\n+ cmd += ('.',)\n helpers.run_setup_cmd(repo_cmd_runner, cmd)\n", "issue": "`docker build` argument order is invalid on old versions of Docker\nWe do: `docker build . --tag thing --label thing`\r\n\r\nBut this produces an error on Docker 1.11.2:\r\n```\r\nckuehl@dev4-uswest1cdevc:~/proj/pre-commit$ docker build . --tag thing --label thing\r\ndocker: \"build\" requires 1 argument.\r\nSee 'docker build --help'.\r\n\r\nUsage: docker build [OPTIONS] PATH | URL | -\r\n\r\nBuild an image from a Dockerfile\r\n```\r\n\r\nThe path needs to go at the end on 1.11.2, but it works on 1.13.0 as-is. We should probably just change the order of the arguments to make every version happy.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport os\n\nfrom pre_commit import five\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\n\n\ndef md5(s): # pragma: windows no cover\n return hashlib.md5(five.to_bytes(s)).hexdigest()\n\n\ndef docker_tag(repo_cmd_runner): # pragma: windows no cover\n return 'pre-commit-{}'.format(\n md5(os.path.basename(repo_cmd_runner.path()))\n ).lower()\n\n\ndef docker_is_running(): # pragma: windows no cover\n try:\n return cmd_output('docker', 'ps')[0] == 0\n except CalledProcessError:\n return False\n\n\ndef assert_docker_available(): # pragma: windows no cover\n assert docker_is_running(), (\n 'Docker is either not running or not configured in this environment'\n )\n\n\ndef build_docker_image(repo_cmd_runner, **kwargs): # pragma: windows no cover\n pull = kwargs.pop('pull')\n assert not kwargs, kwargs\n cmd = (\n 'docker', 'build', '.',\n '--tag', docker_tag(repo_cmd_runner),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n helpers.run_setup_cmd(repo_cmd_runner, cmd)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=(),\n): # pragma: windows no cover\n assert repo_cmd_runner.exists('Dockerfile'), (\n 'No Dockerfile was found in the hook repository'\n )\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n assert_docker_available()\n\n directory = repo_cmd_runner.path(\n helpers.environment_dir(ENVIRONMENT_DIR, 'default'),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup it's state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(repo_cmd_runner, pull=True)\n os.mkdir(directory)\n\n\ndef run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover\n assert_docker_available()\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(repo_cmd_runner, pull=False)\n\n hook_cmd = helpers.to_cmd(hook)\n entry_executable, cmd_rest = hook_cmd[0], hook_cmd[1:]\n\n cmd = (\n 'docker', 'run',\n '--rm',\n '-u', '{}:{}'.format(os.getuid(), os.getgid()),\n '-v', '{}:/src:rw'.format(os.getcwd()),\n '--workdir', '/src',\n '--entrypoint', entry_executable,\n docker_tag(repo_cmd_runner)\n ) + cmd_rest\n\n return xargs(cmd, file_args)\n", "path": "pre_commit/languages/docker.py"}]}
1,566
157
gh_patches_debug_60679
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1655
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Main Nav changes to accomodate "Feedback" button Updating the action based on discussions: 1. Remove `Contact` from main nav. 2. Between `About` and `Submit Data`, add a button `Feedback`. 3. While you are there, change `Submit Data` to `Share Data` (there's a later issue for that which this will close) Button style should be the same as the "Follow Us" button here, except gray, not blue: http://docs.hdx.rwlabs.org/get-involved/ Note that the megaphone icon shown below will not be used. No icon on the button. </issue> <code> [start of ckanext-hdx_theme/ckanext/hdx_theme/version.py] 1 hdx_version = 'v0.4.5' 2 [end of ckanext-hdx_theme/ckanext/hdx_theme/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.4.5' +hdx_version = 'v0.4.6'
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.4.5'\n+hdx_version = 'v0.4.6'\n", "issue": "Main Nav changes to accomodate \"Feedback\" button\nUpdating the action based on discussions:\n1. Remove `Contact` from main nav. \n2. Between `About` and `Submit Data`, add a button `Feedback`. \n3. While you are there, change `Submit Data` to `Share Data` (there's a later issue for that which this will close)\n\nButton style should be the same as the \"Follow Us\" button here, except gray, not blue: http://docs.hdx.rwlabs.org/get-involved/\n\nNote that the megaphone icon shown below will not be used. No icon on the button.\n\n", "before_files": [{"content": "hdx_version = 'v0.4.5'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]}
702
107
gh_patches_debug_7628
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-3074
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add OpenAPI Specification for /schemas/ endpoint and fix the incorrect specs ## Problem In order to ensure the accuracy of the specs generated by drf-spectacular for /schemas/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page. ## Proposed solution * Implement custom preprocessing hook function to filter out all endpoints except for the /schemas/ endpoint.The hook function selectively retains only the endpoint paths that match the /schemas/ * Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /schemas/ endpoint is considered during the OpenAPI specification generation process. * Finally, generate the spec file using the `manage.py spectacular --color --file schema.yml` command </issue> <code> [start of config/settings/openapi.py] 1 def custom_preprocessing_hook(endpoints): 2 filtered = [] 3 for (path, path_regex, method, callback) in endpoints: 4 # Remove all but DRF API endpoints 5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/"): 6 filtered.append((path, path_regex, method, callback)) 7 return filtered 8 9 10 def remove_url_prefix_hook(result, **kwargs): 11 # Remove namespace and version URL prefix from the operation Id of the generated API schema 12 for path, path_info in result['paths'].items(): 13 for method, operation in path_info.items(): 14 operation_id = operation.get('operationId') 15 if operation_id: 16 if path.startswith('/api/db/v0/'): 17 operation['operationId'] = operation_id.replace('db_v0_', '') 18 elif path.startswith('/api/ui/v0/'): 19 operation['operationId'] = operation_id.replace('ui_v0_', '') 20 21 return result 22 [end of config/settings/openapi.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/config/settings/openapi.py b/config/settings/openapi.py --- a/config/settings/openapi.py +++ b/config/settings/openapi.py @@ -2,7 +2,7 @@ filtered = [] for (path, path_regex, method, callback) in endpoints: # Remove all but DRF API endpoints - if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/"): + if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/"): filtered.append((path, path_regex, method, callback)) return filtered
{"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /schemas/ endpoint and fix the incorrect specs\n## Problem\r\nIn order to ensure the accuracy of the specs generated by drf-spectacular for /schemas/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.\r\n\r\n## Proposed solution\r\n* Implement custom preprocessing hook function to filter out all endpoints except for the /schemas/ endpoint.The hook function selectively retains only the endpoint paths that match the /schemas/ \r\n* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /schemas/ endpoint is considered during the OpenAPI specification generation process.\r\n* Finally, generate the spec file using the `manage.py spectacular --color --file schema.yml` command\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
948
150
gh_patches_debug_5497
rasdani/github-patches
git_diff
scikit-hep__pyhf-999
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Keep tighter version constraints on TensorFlow releases # Description As Issue #997 makes it clear that minor releases might as well be major releases for TensorFlow, then it is probably worth keeping tighter version constraints on them and just watching the releases of TensorFlow and TensorFlow Probability to see when we can relax these. </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow~=2.0', 7 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11 8 ], 9 'torch': ['torch~=1.2'], 10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes 12 'minuit': ['iminuit~=1.4,>=1.4.3'], # Use "name" keyword in MINUIT optimizer 13 } 14 extras_require['backends'] = sorted( 15 set( 16 extras_require['tensorflow'] 17 + extras_require['torch'] 18 + extras_require['jax'] 19 + extras_require['minuit'] 20 ) 21 ) 22 extras_require['contrib'] = sorted(set(['matplotlib'])) 23 extras_require['lint'] = sorted(set(['pyflakes', 'black'])) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + extras_require['shellcomplete'] 31 + [ 32 'pytest~=3.5', 33 'pytest-cov>=2.5.1', 34 'pytest-mock', 35 'pytest-benchmark[histogram]', 36 'pytest-console-scripts', 37 'pytest-mpl', 38 'pydocstyle', 39 'coverage>=4.0', # coveralls 40 'papermill~=2.0', 41 'nteract-scrapbook~=0.2', 42 'jupyter', 43 'uproot~=3.3', 44 'graphviz', 45 'jsonpatch', 46 ] 47 ) 48 ) 49 extras_require['docs'] = sorted( 50 set( 51 [ 52 'sphinx>=3.1.2', 53 'sphinxcontrib-bibtex', 54 'sphinx-click', 55 'sphinx_rtd_theme', 56 'nbsphinx', 57 'ipywidgets', 58 'sphinx-issues', 59 'sphinx-copybutton>0.2.9', 60 ] 61 ) 62 ) 63 extras_require['develop'] = sorted( 64 set( 65 extras_require['docs'] 66 + extras_require['lint'] 67 + extras_require['test'] 68 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine'] 69 ) 70 ) 71 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 72 73 74 setup( 75 extras_require=extras_require, 76 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 77 ) 78 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -3,8 +3,8 @@ extras_require = { 'shellcomplete': ['click_completion'], 'tensorflow': [ - 'tensorflow~=2.0', - 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11 + 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major + 'tensorflow-probability~=0.10.0', ], 'torch': ['torch~=1.2'], 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,8 +3,8 @@\n extras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n- 'tensorflow~=2.0',\n- 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11\n+ 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n+ 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n", "issue": "Keep tighter version constraints on TensorFlow releases\n# Description\r\n\r\nAs Issue #997 makes it clear that minor releases might as well be major releases for TensorFlow, then it is probably worth keeping tighter version constraints on them and just watching the releases of TensorFlow and TensorFlow Probability to see when we can relax these.\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4,>=1.4.3'], # Use \"name\" keyword in MINUIT optimizer\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,324
171
gh_patches_debug_41578
rasdani/github-patches
git_diff
PrefectHQ__prefect-1368
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add optional pull to local agent If the image is present locally we don't want to attempt a pull from a registry </issue> <code> [start of src/prefect/cli/agent.py] 1 import click 2 3 from prefect import config 4 from prefect.utilities.configuration import set_temporary_config 5 from prefect.utilities.serialization import from_qualified_name 6 7 _agents = { 8 "local": "prefect.agent.local.LocalAgent", 9 "kubernetes": "prefect.agent.kubernetes.KubernetesAgent", 10 "nomad": "prefect.agent.nomad.NomadAgent", 11 } 12 13 14 @click.group(hidden=True) 15 def agent(): 16 """ 17 Manage Prefect agents. 18 19 \b 20 Usage: 21 $ prefect agent [COMMAND] 22 23 \b 24 Arguments: 25 start Start a Prefect agent 26 27 \b 28 Examples: 29 $ prefect agent start 30 31 \b 32 $ prefect agent start kubernetes --token MY_TOKEN 33 """ 34 pass 35 36 37 @agent.command(hidden=True) 38 @click.argument("name", default="local") 39 @click.option( 40 "--token", "-t", required=False, help="A Prefect Cloud API token.", hidden=True 41 ) 42 def start(name, token): 43 """ 44 Start an agent. 45 46 \b 47 Arguments: 48 name TEXT The name of an agent to start (e.g. `local`, `kubernetes`, `nomad`) 49 Defaults to `local` 50 51 \b 52 Options: 53 --token, -t TEXT A Prefect Cloud api token 54 """ 55 with set_temporary_config( 56 {"cloud.agent.auth_token": token or config.cloud.agent.auth_token} 57 ): 58 retrieved_agent = _agents.get(name, None) 59 60 if not retrieved_agent: 61 click.secho("{} is not a valid agent".format(name), fg="red") 62 return 63 64 from_qualified_name(retrieved_agent)().start() 65 [end of src/prefect/cli/agent.py] [start of src/prefect/agent/local/agent.py] 1 import docker 2 3 from prefect import config 4 from prefect.agent import Agent 5 from prefect.environments.storage import Docker 6 from prefect.serialization.storage import StorageSchema 7 from prefect.utilities.graphql import GraphQLResult 8 9 10 class LocalAgent(Agent): 11 """ 12 Agent which deploys flow runs locally as Docker containers. 13 14 Args: 15 - base_url (str, optional): URL for a Docker daemon server. Defaults to 16 `unix:///var/run/docker.sock` however other hosts such as 17 `tcp://0.0.0.0:2375` can be provided 18 """ 19 20 def __init__(self, base_url: str = None) -> None: 21 super().__init__() 22 23 base_url = base_url or "unix://var/run/docker.sock" 24 self.docker_client = docker.APIClient(base_url=base_url, version="auto") 25 26 # Ping Docker daemon for connection issues 27 try: 28 self.docker_client.ping() 29 except Exception as exc: 30 self.logger.error( 31 "Issue connecting to the Docker daemon. Make sure it is running." 32 ) 33 raise exc 34 35 def deploy_flows(self, flow_runs: list) -> None: 36 """ 37 Deploy flow runs on your local machine as Docker containers 38 39 Args: 40 - flow_runs (list): A list of GraphQLResult flow run objects 41 """ 42 for flow_run in flow_runs: 43 44 storage = StorageSchema().load(flow_run.flow.storage) 45 if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker): 46 self.logger.error( 47 "Storage for flow run {} is not of type Docker.".format(flow_run.id) 48 ) 49 continue 50 51 env_vars = self.populate_env_vars(flow_run=flow_run) 52 53 # Pull image if it doesn't exist locally 54 self.docker_client.pull(storage.name) 55 56 # Create a container 57 container = self.docker_client.create_container( 58 storage.name, command="prefect execute cloud-flow", environment=env_vars 59 ) 60 61 # Start the container 62 self.docker_client.start(container=container.get("Id")) 63 64 def populate_env_vars(self, flow_run: GraphQLResult) -> dict: 65 """ 66 Populate metadata and variables in the environment variables for a flow run 67 68 Args: 69 - flow_run (GraphQLResult): A flow run object 70 71 Returns: 72 - dict: a dictionary representing the populated environment variables 73 """ 74 return { 75 "PREFECT__CLOUD__API": config.cloud.api, 76 "PREFECT__CLOUD__AUTH_TOKEN": config.cloud.agent.auth_token, 77 "PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id, # type: ignore 78 "PREFECT__CLOUD__USE_LOCAL_SECRETS": "false", 79 "PREFECT__LOGGING__LOG_TO_CLOUD": "true", 80 "PREFECT__LOGGING__LEVEL": "DEBUG", 81 "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner", 82 "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner", 83 } 84 85 86 if __name__ == "__main__": 87 LocalAgent().start() 88 [end of src/prefect/agent/local/agent.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py --- a/src/prefect/agent/local/agent.py +++ b/src/prefect/agent/local/agent.py @@ -1,6 +1,6 @@ import docker -from prefect import config +from prefect import config, context from prefect.agent import Agent from prefect.environments.storage import Docker from prefect.serialization.storage import StorageSchema @@ -15,9 +15,11 @@ - base_url (str, optional): URL for a Docker daemon server. Defaults to `unix:///var/run/docker.sock` however other hosts such as `tcp://0.0.0.0:2375` can be provided + - no_pull (bool, optional): Flag on whether or not to pull flow images. + Defaults to `False` if not provided here or in context. """ - def __init__(self, base_url: str = None) -> None: + def __init__(self, base_url: str = None, no_pull: bool = None) -> None: super().__init__() base_url = base_url or "unix://var/run/docker.sock" @@ -32,6 +34,10 @@ ) raise exc + self.no_pull = no_pull or context.get("no_pull") + if self.no_pull is None: + self.no_pull = False + def deploy_flows(self, flow_runs: list) -> None: """ Deploy flow runs on your local machine as Docker containers @@ -50,8 +56,8 @@ env_vars = self.populate_env_vars(flow_run=flow_run) - # Pull image if it doesn't exist locally - self.docker_client.pull(storage.name) + if not self.no_pull: + self.docker_client.pull(storage.name) # Create a container container = self.docker_client.create_container( diff --git a/src/prefect/cli/agent.py b/src/prefect/cli/agent.py --- a/src/prefect/cli/agent.py +++ b/src/prefect/cli/agent.py @@ -1,6 +1,6 @@ import click -from prefect import config +from prefect import config, context from prefect.utilities.configuration import set_temporary_config from prefect.utilities.serialization import from_qualified_name @@ -39,7 +39,8 @@ @click.option( "--token", "-t", required=False, help="A Prefect Cloud API token.", hidden=True ) -def start(name, token): [email protected]("--no-pull", is_flag=True, help="Pull images flag.", hidden=True) +def start(name, token, no_pull): """ Start an agent. @@ -51,6 +52,8 @@ \b Options: --token, -t TEXT A Prefect Cloud api token + --no-pull Pull images for a LocalAgent + Defaults to pulling if not provided """ with set_temporary_config( {"cloud.agent.auth_token": token or config.cloud.agent.auth_token} @@ -61,4 +64,5 @@ click.secho("{} is not a valid agent".format(name), fg="red") return - from_qualified_name(retrieved_agent)().start() + with context(no_pull=no_pull): + from_qualified_name(retrieved_agent)().start()
{"golden_diff": "diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py\n--- a/src/prefect/agent/local/agent.py\n+++ b/src/prefect/agent/local/agent.py\n@@ -1,6 +1,6 @@\n import docker\n \n-from prefect import config\n+from prefect import config, context\n from prefect.agent import Agent\n from prefect.environments.storage import Docker\n from prefect.serialization.storage import StorageSchema\n@@ -15,9 +15,11 @@\n - base_url (str, optional): URL for a Docker daemon server. Defaults to\n `unix:///var/run/docker.sock` however other hosts such as\n `tcp://0.0.0.0:2375` can be provided\n+ - no_pull (bool, optional): Flag on whether or not to pull flow images.\n+ Defaults to `False` if not provided here or in context.\n \"\"\"\n \n- def __init__(self, base_url: str = None) -> None:\n+ def __init__(self, base_url: str = None, no_pull: bool = None) -> None:\n super().__init__()\n \n base_url = base_url or \"unix://var/run/docker.sock\"\n@@ -32,6 +34,10 @@\n )\n raise exc\n \n+ self.no_pull = no_pull or context.get(\"no_pull\")\n+ if self.no_pull is None:\n+ self.no_pull = False\n+\n def deploy_flows(self, flow_runs: list) -> None:\n \"\"\"\n Deploy flow runs on your local machine as Docker containers\n@@ -50,8 +56,8 @@\n \n env_vars = self.populate_env_vars(flow_run=flow_run)\n \n- # Pull image if it doesn't exist locally\n- self.docker_client.pull(storage.name)\n+ if not self.no_pull:\n+ self.docker_client.pull(storage.name)\n \n # Create a container\n container = self.docker_client.create_container(\ndiff --git a/src/prefect/cli/agent.py b/src/prefect/cli/agent.py\n--- a/src/prefect/cli/agent.py\n+++ b/src/prefect/cli/agent.py\n@@ -1,6 +1,6 @@\n import click\n \n-from prefect import config\n+from prefect import config, context\n from prefect.utilities.configuration import set_temporary_config\n from prefect.utilities.serialization import from_qualified_name\n \n@@ -39,7 +39,8 @@\n @click.option(\n \"--token\", \"-t\", required=False, help=\"A Prefect Cloud API token.\", hidden=True\n )\n-def start(name, token):\[email protected](\"--no-pull\", is_flag=True, help=\"Pull images flag.\", hidden=True)\n+def start(name, token, no_pull):\n \"\"\"\n Start an agent.\n \n@@ -51,6 +52,8 @@\n \\b\n Options:\n --token, -t TEXT A Prefect Cloud api token\n+ --no-pull Pull images for a LocalAgent\n+ Defaults to pulling if not provided\n \"\"\"\n with set_temporary_config(\n {\"cloud.agent.auth_token\": token or config.cloud.agent.auth_token}\n@@ -61,4 +64,5 @@\n click.secho(\"{} is not a valid agent\".format(name), fg=\"red\")\n return\n \n- from_qualified_name(retrieved_agent)().start()\n+ with context(no_pull=no_pull):\n+ from_qualified_name(retrieved_agent)().start()\n", "issue": "Add optional pull to local agent\nIf the image is present locally we don't want to attempt a pull from a registry\n", "before_files": [{"content": "import click\n\nfrom prefect import config\nfrom prefect.utilities.configuration import set_temporary_config\nfrom prefect.utilities.serialization import from_qualified_name\n\n_agents = {\n \"local\": \"prefect.agent.local.LocalAgent\",\n \"kubernetes\": \"prefect.agent.kubernetes.KubernetesAgent\",\n \"nomad\": \"prefect.agent.nomad.NomadAgent\",\n}\n\n\[email protected](hidden=True)\ndef agent():\n \"\"\"\n Manage Prefect agents.\n\n \\b\n Usage:\n $ prefect agent [COMMAND]\n\n \\b\n Arguments:\n start Start a Prefect agent\n\n \\b\n Examples:\n $ prefect agent start\n\n \\b\n $ prefect agent start kubernetes --token MY_TOKEN\n \"\"\"\n pass\n\n\[email protected](hidden=True)\[email protected](\"name\", default=\"local\")\[email protected](\n \"--token\", \"-t\", required=False, help=\"A Prefect Cloud API token.\", hidden=True\n)\ndef start(name, token):\n \"\"\"\n Start an agent.\n\n \\b\n Arguments:\n name TEXT The name of an agent to start (e.g. `local`, `kubernetes`, `nomad`)\n Defaults to `local`\n\n \\b\n Options:\n --token, -t TEXT A Prefect Cloud api token\n \"\"\"\n with set_temporary_config(\n {\"cloud.agent.auth_token\": token or config.cloud.agent.auth_token}\n ):\n retrieved_agent = _agents.get(name, None)\n\n if not retrieved_agent:\n click.secho(\"{} is not a valid agent\".format(name), fg=\"red\")\n return\n\n from_qualified_name(retrieved_agent)().start()\n", "path": "src/prefect/cli/agent.py"}, {"content": "import docker\n\nfrom prefect import config\nfrom prefect.agent import Agent\nfrom prefect.environments.storage import Docker\nfrom prefect.serialization.storage import StorageSchema\nfrom prefect.utilities.graphql import GraphQLResult\n\n\nclass LocalAgent(Agent):\n \"\"\"\n Agent which deploys flow runs locally as Docker containers.\n\n Args:\n - base_url (str, optional): URL for a Docker daemon server. Defaults to\n `unix:///var/run/docker.sock` however other hosts such as\n `tcp://0.0.0.0:2375` can be provided\n \"\"\"\n\n def __init__(self, base_url: str = None) -> None:\n super().__init__()\n\n base_url = base_url or \"unix://var/run/docker.sock\"\n self.docker_client = docker.APIClient(base_url=base_url, version=\"auto\")\n\n # Ping Docker daemon for connection issues\n try:\n self.docker_client.ping()\n except Exception as exc:\n self.logger.error(\n \"Issue connecting to the Docker daemon. Make sure it is running.\"\n )\n raise exc\n\n def deploy_flows(self, flow_runs: list) -> None:\n \"\"\"\n Deploy flow runs on your local machine as Docker containers\n\n Args:\n - flow_runs (list): A list of GraphQLResult flow run objects\n \"\"\"\n for flow_run in flow_runs:\n\n storage = StorageSchema().load(flow_run.flow.storage)\n if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):\n self.logger.error(\n \"Storage for flow run {} is not of type Docker.\".format(flow_run.id)\n )\n continue\n\n env_vars = self.populate_env_vars(flow_run=flow_run)\n\n # Pull image if it doesn't exist locally\n self.docker_client.pull(storage.name)\n\n # Create a container\n container = self.docker_client.create_container(\n storage.name, command=\"prefect execute cloud-flow\", environment=env_vars\n )\n\n # Start the container\n self.docker_client.start(container=container.get(\"Id\"))\n\n def populate_env_vars(self, flow_run: GraphQLResult) -> dict:\n \"\"\"\n Populate metadata and variables in the environment variables for a flow run\n\n Args:\n - flow_run (GraphQLResult): A flow run object\n\n Returns:\n - dict: a dictionary representing the populated environment variables\n \"\"\"\n return {\n \"PREFECT__CLOUD__API\": config.cloud.api,\n \"PREFECT__CLOUD__AUTH_TOKEN\": config.cloud.agent.auth_token,\n \"PREFECT__CONTEXT__FLOW_RUN_ID\": flow_run.id, # type: ignore\n \"PREFECT__CLOUD__USE_LOCAL_SECRETS\": \"false\",\n \"PREFECT__LOGGING__LOG_TO_CLOUD\": \"true\",\n \"PREFECT__LOGGING__LEVEL\": \"DEBUG\",\n \"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudFlowRunner\",\n \"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudTaskRunner\",\n }\n\n\nif __name__ == \"__main__\":\n LocalAgent().start()\n", "path": "src/prefect/agent/local/agent.py"}]}
1,927
761
gh_patches_debug_517
rasdani/github-patches
git_diff
PaddlePaddle__models-4963
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> emotion_detection/utils.py代码错误 utils.py第62行:seq_len = max_seq_len 是缩进错误吧?应该包括在else内,否则seq_len不能表示sequence的真实长度 </issue> <code> [start of PaddleNLP/emotion_detection/utils.py] 1 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 EmoTect utilities. 16 """ 17 from __future__ import absolute_import 18 from __future__ import division 19 from __future__ import print_function 20 21 import io 22 import os 23 import sys 24 import six 25 import random 26 27 import paddle 28 import paddle.fluid as fluid 29 import numpy as np 30 31 32 def init_checkpoint(exe, init_checkpoint_path, main_program): 33 """ 34 Init CheckPoint 35 """ 36 37 fluid.load(main_program, init_checkpoint_path, exe) 38 39 40 def word2id(word_dict, query): 41 """ 42 Convert word sequence into id list 43 """ 44 unk_id = len(word_dict) 45 wids = [ 46 word_dict[w] if w in word_dict else unk_id 47 for w in query.strip().split(" ") 48 ] 49 return wids 50 51 52 def pad_wid(wids, max_seq_len=128, pad_id=0): 53 """ 54 Padding data to max_seq_len 55 """ 56 seq_len = len(wids) 57 if seq_len < max_seq_len: 58 for i in range(max_seq_len - seq_len): 59 wids.append(pad_id) 60 else: 61 wids = wids[:max_seq_len] 62 seq_len = max_seq_len 63 return wids, seq_len 64 65 66 def data_reader(file_path, word_dict, num_examples, phrase, epoch, max_seq_len): 67 """ 68 Data reader, which convert word sequence into id list 69 """ 70 all_data = [] 71 with io.open(file_path, "r", encoding='utf8') as fin: 72 for line in fin: 73 if line.startswith("label"): 74 continue 75 if phrase == "infer": 76 cols = line.strip().split("\t") 77 query = cols[-1] if len(cols) != -1 else cols[0] 78 wids = word2id(word_dict, query) 79 wids, seq_len = pad_wid(wids, max_seq_len) 80 all_data.append((wids, seq_len)) 81 else: 82 cols = line.strip().split("\t") 83 if len(cols) != 2: 84 sys.stderr.write("[NOTICE] Error Format Line!") 85 continue 86 label = int(cols[0]) 87 query = cols[1].strip() 88 wids = word2id(word_dict, query) 89 wids, seq_len = pad_wid(wids, max_seq_len) 90 all_data.append((wids, label, seq_len)) 91 num_examples[phrase] = len(all_data) 92 93 if phrase == "infer": 94 95 def reader(): 96 """ 97 Infer reader function 98 """ 99 for wids, seq_len in all_data: 100 yield wids, seq_len 101 102 return reader 103 104 def reader(): 105 """ 106 Reader function 107 """ 108 for idx in range(epoch): 109 if phrase == "train" and 'ce_mode' not in os.environ: 110 random.shuffle(all_data) 111 for wids, label, seq_len in all_data: 112 yield wids, label, seq_len 113 114 return reader 115 116 117 def load_vocab(file_path): 118 """ 119 load the given vocabulary 120 """ 121 vocab = {} 122 with io.open(file_path, 'r', encoding='utf8') as fin: 123 wid = 0 124 for line in fin: 125 if line.strip() not in vocab: 126 vocab[line.strip()] = wid 127 wid += 1 128 vocab["<unk>"] = len(vocab) 129 return vocab 130 131 132 def print_arguments(args): 133 """ 134 print arguments 135 """ 136 print('----------- Configuration Arguments -----------') 137 for arg, value in sorted(six.iteritems(vars(args))): 138 print('%s: %s' % (arg, value)) 139 print('------------------------------------------------') 140 141 142 def query2ids(vocab_path, query): 143 """ 144 Convert query to id list according to the given vocab 145 """ 146 vocab = load_vocab(vocab_path) 147 wids = word2id(vocab, query) 148 return wids 149 [end of PaddleNLP/emotion_detection/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PaddleNLP/emotion_detection/utils.py b/PaddleNLP/emotion_detection/utils.py --- a/PaddleNLP/emotion_detection/utils.py +++ b/PaddleNLP/emotion_detection/utils.py @@ -59,7 +59,7 @@ wids.append(pad_id) else: wids = wids[:max_seq_len] - seq_len = max_seq_len + seq_len = max_seq_len return wids, seq_len
{"golden_diff": "diff --git a/PaddleNLP/emotion_detection/utils.py b/PaddleNLP/emotion_detection/utils.py\n--- a/PaddleNLP/emotion_detection/utils.py\n+++ b/PaddleNLP/emotion_detection/utils.py\n@@ -59,7 +59,7 @@\n wids.append(pad_id)\n else:\n wids = wids[:max_seq_len]\n- seq_len = max_seq_len\n+ seq_len = max_seq_len\n return wids, seq_len\n", "issue": "emotion_detection/utils.py\u4ee3\u7801\u9519\u8bef\nutils.py\u7b2c62\u884c\uff1aseq_len = max_seq_len \u662f\u7f29\u8fdb\u9519\u8bef\u5427\uff1f\u5e94\u8be5\u5305\u62ec\u5728else\u5185\uff0c\u5426\u5219seq_len\u4e0d\u80fd\u8868\u793asequence\u7684\u771f\u5b9e\u957f\u5ea6\n", "before_files": [{"content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nEmoTect utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\nimport sys\nimport six\nimport random\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\n\n\ndef init_checkpoint(exe, init_checkpoint_path, main_program):\n \"\"\"\n Init CheckPoint\n \"\"\"\n\n fluid.load(main_program, init_checkpoint_path, exe)\n\n\ndef word2id(word_dict, query):\n \"\"\"\n Convert word sequence into id list\n \"\"\"\n unk_id = len(word_dict)\n wids = [\n word_dict[w] if w in word_dict else unk_id\n for w in query.strip().split(\" \")\n ]\n return wids\n\n\ndef pad_wid(wids, max_seq_len=128, pad_id=0):\n \"\"\"\n Padding data to max_seq_len\n \"\"\"\n seq_len = len(wids)\n if seq_len < max_seq_len:\n for i in range(max_seq_len - seq_len):\n wids.append(pad_id)\n else:\n wids = wids[:max_seq_len]\n seq_len = max_seq_len\n return wids, seq_len\n\n\ndef data_reader(file_path, word_dict, num_examples, phrase, epoch, max_seq_len):\n \"\"\"\n Data reader, which convert word sequence into id list\n \"\"\"\n all_data = []\n with io.open(file_path, \"r\", encoding='utf8') as fin:\n for line in fin:\n if line.startswith(\"label\"):\n continue\n if phrase == \"infer\":\n cols = line.strip().split(\"\\t\")\n query = cols[-1] if len(cols) != -1 else cols[0]\n wids = word2id(word_dict, query)\n wids, seq_len = pad_wid(wids, max_seq_len)\n all_data.append((wids, seq_len))\n else:\n cols = line.strip().split(\"\\t\")\n if len(cols) != 2:\n sys.stderr.write(\"[NOTICE] Error Format Line!\")\n continue\n label = int(cols[0])\n query = cols[1].strip()\n wids = word2id(word_dict, query)\n wids, seq_len = pad_wid(wids, max_seq_len)\n all_data.append((wids, label, seq_len))\n num_examples[phrase] = len(all_data)\n\n if phrase == \"infer\":\n\n def reader():\n \"\"\"\n Infer reader function\n \"\"\"\n for wids, seq_len in all_data:\n yield wids, seq_len\n\n return reader\n\n def reader():\n \"\"\"\n Reader function\n \"\"\"\n for idx in range(epoch):\n if phrase == \"train\" and 'ce_mode' not in os.environ:\n random.shuffle(all_data)\n for wids, label, seq_len in all_data:\n yield wids, label, seq_len\n\n return reader\n\n\ndef load_vocab(file_path):\n \"\"\"\n load the given vocabulary\n \"\"\"\n vocab = {}\n with io.open(file_path, 'r', encoding='utf8') as fin:\n wid = 0\n for line in fin:\n if line.strip() not in vocab:\n vocab[line.strip()] = wid\n wid += 1\n vocab[\"<unk>\"] = len(vocab)\n return vocab\n\n\ndef print_arguments(args):\n \"\"\"\n print arguments\n \"\"\"\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(six.iteritems(vars(args))):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\ndef query2ids(vocab_path, query):\n \"\"\"\n Convert query to id list according to the given vocab\n \"\"\"\n vocab = load_vocab(vocab_path)\n wids = word2id(vocab, query)\n return wids\n", "path": "PaddleNLP/emotion_detection/utils.py"}]}
1,911
108