problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_24491 | rasdani/github-patches | git_diff | safe-global__safe-config-service-6 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bootstrap database with default data for the /safe-apps endpoint
When the database is created and the service is running, we should offer the option/utility to bootstrap it with the default list of safe-apps.
Default database should contain the following data: https://github.com/gnosis/safe-apps-list/blob/development/public/gnosis-default.applist.json
</issue>
<code>
[start of src/safe_apps/models.py]
1 from django.contrib.postgres.fields import ArrayField
2 from django.db import models
3
4
5 class SafeApp(models.Model):
6 url = models.URLField()
7 name = models.CharField(max_length=200)
8 icon_url = models.URLField()
9 description = models.CharField(max_length=200)
10 networks = ArrayField(models.IntegerField())
11
[end of src/safe_apps/models.py]
[start of src/safe_apps/migrations/0001_initial.py]
1 # Generated by Django 3.2 on 2021-04-16 14:52
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 initial = True
10
11 dependencies = [
12 ]
13
14 operations = [
15 migrations.CreateModel(
16 name='SafeApp',
17 fields=[
18 ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
19 ('url', models.URLField()),
20 ('name', models.CharField(max_length=200)),
21 ('icon_url', models.URLField()),
22 ('description', models.CharField(max_length=200)),
23 ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
24 ],
25 ),
26 ]
27
[end of src/safe_apps/migrations/0001_initial.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py
--- a/src/safe_apps/migrations/0001_initial.py
+++ b/src/safe_apps/migrations/0001_initial.py
@@ -1,4 +1,4 @@
-# Generated by Django 3.2 on 2021-04-16 14:52
+# Generated by Django 3.2 on 2021-04-28 13:39
import django.contrib.postgres.fields
from django.db import migrations, models
@@ -15,8 +15,7 @@
migrations.CreateModel(
name='SafeApp',
fields=[
- ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('url', models.URLField()),
+ ('url', models.URLField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('icon_url', models.URLField()),
('description', models.CharField(max_length=200)),
diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py
--- a/src/safe_apps/models.py
+++ b/src/safe_apps/models.py
@@ -3,7 +3,7 @@
class SafeApp(models.Model):
- url = models.URLField()
+ url = models.URLField(primary_key=True)
name = models.CharField(max_length=200)
icon_url = models.URLField()
description = models.CharField(max_length=200)
| {"golden_diff": "diff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py\n--- a/src/safe_apps/migrations/0001_initial.py\n+++ b/src/safe_apps/migrations/0001_initial.py\n@@ -1,4 +1,4 @@\n-# Generated by Django 3.2 on 2021-04-16 14:52\n+# Generated by Django 3.2 on 2021-04-28 13:39\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n@@ -15,8 +15,7 @@\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n- ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n- ('url', models.URLField()),\n+ ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\ndiff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -3,7 +3,7 @@\n \n \n class SafeApp(models.Model):\n- url = models.URLField()\n+ url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n", "issue": "Bootstrap database with default data for the /safe-apps endpoint \nWhen the database is created and the service is running, we should offer the option/utility to bootstrap it with the default list of safe-apps.\r\n\r\nDefault database should contain the following data: https://github.com/gnosis/safe-apps-list/blob/development/public/gnosis-default.applist.json\n", "before_files": [{"content": "from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\n\nclass SafeApp(models.Model):\n url = models.URLField()\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n", "path": "src/safe_apps/models.py"}, {"content": "# Generated by Django 3.2 on 2021-04-16 14:52\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('url', models.URLField()),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n ],\n ),\n ]\n", "path": "src/safe_apps/migrations/0001_initial.py"}]} | 949 | 359 |
gh_patches_debug_23010 | rasdani/github-patches | git_diff | uccser__cs-unplugged-67 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add django-debug-toolbar for debugging
</issue>
<code>
[start of csunplugged/config/settings.py]
1 """
2 Django settings for csunplugged project.
3
4 Generated by 'django-admin startproject' using Django 1.10.3.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.10/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.10/ref/settings/
11 """
12
13 import os
14 from config.settings_secret import *
15
16 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
17 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
18
19 # nasty hard coding
20 SETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))
21
22
23 # Quick-start development settings - unsuitable for production
24 # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
25
26 # SECURITY WARNING: keep the secret key used in production secret!
27 SECRET_KEY = 'l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f'
28
29 # SECURITY WARNING: don't run with debug turned on in production!
30 DEBUG = True
31
32 ALLOWED_HOSTS = []
33
34
35 # Application definition
36
37 INSTALLED_APPS = [
38 'general.apps.GeneralConfig',
39 'topics.apps.TopicsConfig',
40 'resources.apps.ResourcesConfig',
41 'django.contrib.admin',
42 'django.contrib.auth',
43 'django.contrib.contenttypes',
44 'django.contrib.sessions',
45 'django.contrib.messages',
46 'django.contrib.staticfiles',
47 ]
48
49 MIDDLEWARE = [
50 'django.middleware.security.SecurityMiddleware',
51 'django.contrib.sessions.middleware.SessionMiddleware',
52 'django.middleware.locale.LocaleMiddleware',
53 'django.middleware.common.CommonMiddleware',
54 'django.middleware.csrf.CsrfViewMiddleware',
55 'django.contrib.auth.middleware.AuthenticationMiddleware',
56 'django.contrib.messages.middleware.MessageMiddleware',
57 'django.middleware.clickjacking.XFrameOptionsMiddleware',
58 ]
59
60 ROOT_URLCONF = 'config.urls'
61
62 TEMPLATES = [
63 {
64 'BACKEND': 'django.template.backends.django.DjangoTemplates',
65 'DIRS': [
66 os.path.join(SETTINGS_PATH, 'templates'),
67 os.path.join(SETTINGS_PATH, 'resources/content/')
68 ],
69 'APP_DIRS': True,
70 'OPTIONS': {
71 'context_processors': [
72 'django.template.context_processors.debug',
73 'django.template.context_processors.request',
74 'django.contrib.auth.context_processors.auth',
75 'django.contrib.messages.context_processors.messages',
76 ],
77 },
78 },
79 ]
80
81 WSGI_APPLICATION = 'config.wsgi.application'
82
83
84 # Database
85 # https://docs.djangoproject.com/en/1.10/ref/settings/#databases
86 # Database values are stored in `settings_secret.py`
87 # A template of this file is available as `settings_secret_template.py`
88
89
90 # Password validation
91 # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
92
93 AUTH_PASSWORD_VALIDATORS = [
94 {
95 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
96 },
97 {
98 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
99 },
100 {
101 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
105 },
106 ]
107
108
109 # Internationalization
110 # https://docs.djangoproject.com/en/1.10/topics/i18n/
111
112 LANGUAGE_CODE = 'en-us'
113
114 TIME_ZONE = 'UTC'
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122 LOCALE_PATHS = ['locale']
123
124 # Static files (CSS, JavaScript, Images)
125 # https://docs.djangoproject.com/en/1.10/howto/static-files/
126
127 STATIC_URL = '/static/'
128 STATICFILES_DIRS = (
129 os.path.join(BASE_DIR, 'build'),
130 )
131
[end of csunplugged/config/settings.py]
[start of csunplugged/config/urls.py]
1 """csunplugged URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/1.10/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.conf.urls import url, include
14 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
15 """
16 from django.conf.urls import include, url
17 from django.conf.urls.i18n import i18n_patterns
18 from django.contrib import admin
19 from django.conf import settings
20 from django.conf.urls.static import static
21
22 urlpatterns = i18n_patterns(
23 url(r'', include('general.urls', namespace='general')),
24 url(r'^topics/', include('topics.urls', namespace='topics')),
25 url(r'^resources/', include('resources.urls', namespace='resources')),
26 url(r'^admin/', include(admin.site.urls)),
27 )
28 # ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)
29
[end of csunplugged/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/config/settings.py b/csunplugged/config/settings.py
--- a/csunplugged/config/settings.py
+++ b/csunplugged/config/settings.py
@@ -44,9 +44,11 @@
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
+ 'debug_toolbar',
]
MIDDLEWARE = [
+ 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
@@ -128,3 +130,7 @@
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'build'),
)
+
+# Internal IPs for Django Debug Toolbar
+# https://docs.djangoproject.com/en/1.10/ref/settings/#internal-ips
+INTERNAL_IPS = ['127.0.0.1']
diff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py
--- a/csunplugged/config/urls.py
+++ b/csunplugged/config/urls.py
@@ -26,3 +26,9 @@
url(r'^admin/', include(admin.site.urls)),
)
# ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)
+
+if settings.DEBUG:
+ import debug_toolbar
+ urlpatterns += [
+ url(r'^__debug__/', include(debug_toolbar.urls)),
+ ]
| {"golden_diff": "diff --git a/csunplugged/config/settings.py b/csunplugged/config/settings.py\n--- a/csunplugged/config/settings.py\n+++ b/csunplugged/config/settings.py\n@@ -44,9 +44,11 @@\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n+ 'debug_toolbar',\n ]\n \n MIDDLEWARE = [\n+ 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n@@ -128,3 +130,7 @@\n STATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'build'),\n )\n+\n+# Internal IPs for Django Debug Toolbar\n+# https://docs.djangoproject.com/en/1.10/ref/settings/#internal-ips\n+INTERNAL_IPS = ['127.0.0.1']\ndiff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py\n--- a/csunplugged/config/urls.py\n+++ b/csunplugged/config/urls.py\n@@ -26,3 +26,9 @@\n url(r'^admin/', include(admin.site.urls)),\n )\n # ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)\n+\n+if settings.DEBUG:\n+ import debug_toolbar\n+ urlpatterns += [\n+ url(r'^__debug__/', include(debug_toolbar.urls)),\n+ ]\n", "issue": "Add django-debug-toolbar for debugging\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for csunplugged project.\n\nGenerated by 'django-admin startproject' using Django 1.10.3.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\n\nimport os\nfrom config.settings_secret import *\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# nasty hard coding\nSETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'general.apps.GeneralConfig',\n 'topics.apps.TopicsConfig',\n 'resources.apps.ResourcesConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'config.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(SETTINGS_PATH, 'templates'),\n os.path.join(SETTINGS_PATH, 'resources/content/')\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'config.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n# Database values are stored in `settings_secret.py`\n# A template of this file is available as `settings_secret_template.py`\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = ['locale']\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'build'),\n )\n", "path": "csunplugged/config/settings.py"}, {"content": "\"\"\"csunplugged URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = i18n_patterns(\n url(r'', include('general.urls', namespace='general')),\n url(r'^topics/', include('topics.urls', namespace='topics')),\n url(r'^resources/', include('resources.urls', namespace='resources')),\n url(r'^admin/', include(admin.site.urls)),\n)\n# ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)\n", "path": "csunplugged/config/urls.py"}]} | 2,028 | 316 |
gh_patches_debug_33414 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-6733 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Caffe Nero GB spider using outdated JSON file
The caffe_nero_gb.py spider gets its data from JSON file that the Store Finder page at https://caffenero.com/uk/stores/ uses to display its map. However, it looks like that URL of that JSON file has changed, and ATP is still referencing the old (and no longer updated one).
The ATP code currently has
`allowed_domains = ["caffenero-webassets-production.s3.eu-west-2.amazonaws.com"]`
`start_urls = ["https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json"]`
But the URL referenced by https://caffenero.com/uk/stores/ is now
https://caffenerowebsite.blob.core.windows.net/production/data/stores/stores-gb.json
I think the format of the JSON file has remained the same, so it should just be a matter of swapping the URLs over.
To help issues like this be picked up sooner in the future, I wonder if there's a way of checking that the JSON URL used is still included in the https://caffenero.com/uk/stores/ page, and producing a warning to anyone running ATP if not?
</issue>
<code>
[start of locations/spiders/caffe_nero_gb.py]
1 from scrapy import Spider
2 from scrapy.http import JsonRequest
3
4 from locations.categories import Categories, Extras, apply_category, apply_yes_no
5 from locations.dict_parser import DictParser
6 from locations.hours import OpeningHours
7
8
9 class CaffeNeroGBSpider(Spider):
10 name = "caffe_nero_gb"
11 item_attributes = {"brand": "Caffe Nero", "brand_wikidata": "Q675808"}
12 allowed_domains = ["caffenero-webassets-production.s3.eu-west-2.amazonaws.com"]
13 start_urls = ["https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json"]
14
15 def start_requests(self):
16 for url in self.start_urls:
17 yield JsonRequest(url=url)
18
19 def parse(self, response):
20 for location in response.json()["features"]:
21 if (
22 not location["properties"]["status"]["open"]
23 or location["properties"]["status"]["opening_soon"]
24 or location["properties"]["status"]["temp_closed"]
25 ):
26 continue
27
28 item = DictParser.parse(location["properties"])
29 item["geometry"] = location["geometry"]
30 if location["properties"]["status"]["express"]:
31 item["brand"] = "Nero Express"
32
33 item["opening_hours"] = OpeningHours()
34 for day_name, day_hours in location["properties"]["hoursRegular"].items():
35 if day_hours["open"] == "closed" or day_hours["close"] == "closed":
36 continue
37 if day_name == "holiday":
38 continue
39 item["opening_hours"].add_range(day_name.title(), day_hours["open"], day_hours["close"])
40
41 apply_yes_no(Extras.TAKEAWAY, item, location["properties"]["status"]["takeaway"], False)
42 apply_yes_no(Extras.DELIVERY, item, location["properties"]["status"]["delivery"], False)
43 apply_yes_no(Extras.WIFI, item, location["properties"]["amenities"]["wifi"], False)
44 apply_yes_no(Extras.TOILETS, item, location["properties"]["amenities"]["toilet"], False)
45 apply_yes_no(Extras.BABY_CHANGING_TABLE, item, location["properties"]["amenities"]["baby_change"], False)
46 apply_yes_no(Extras.SMOKING_AREA, item, location["properties"]["amenities"]["smoking_area"], False)
47 apply_yes_no(Extras.AIR_CONDITIONING, item, location["properties"]["amenities"]["air_conditioned"], False)
48 apply_yes_no(Extras.WHEELCHAIR, item, location["properties"]["amenities"].get("disabled_access"), False)
49 apply_yes_no(Extras.TOILETS_WHEELCHAIR, item, location["properties"]["amenities"]["disabled_toilet"], False)
50 apply_yes_no(Extras.OUTDOOR_SEATING, item, location["properties"]["amenities"]["outside_seating"], False)
51 apply_category(Categories.COFFEE_SHOP, item)
52
53 item["website"] = f'https://caffenero.com/uk/store/{location["properties"]["slug"]}/'
54
55 yield item
56
[end of locations/spiders/caffe_nero_gb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/caffe_nero_gb.py b/locations/spiders/caffe_nero_gb.py
--- a/locations/spiders/caffe_nero_gb.py
+++ b/locations/spiders/caffe_nero_gb.py
@@ -1,5 +1,8 @@
+import re
+from typing import Any
+
from scrapy import Spider
-from scrapy.http import JsonRequest
+from scrapy.http import JsonRequest, Response
from locations.categories import Categories, Extras, apply_category, apply_yes_no
from locations.dict_parser import DictParser
@@ -9,14 +12,15 @@
class CaffeNeroGBSpider(Spider):
name = "caffe_nero_gb"
item_attributes = {"brand": "Caffe Nero", "brand_wikidata": "Q675808"}
- allowed_domains = ["caffenero-webassets-production.s3.eu-west-2.amazonaws.com"]
- start_urls = ["https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json"]
+ allowed_domains = ["caffenero.com", "caffenerowebsite.blob.core.windows.net"]
+ start_urls = ["https://caffenero.com/uk/stores/"]
- def start_requests(self):
- for url in self.start_urls:
- yield JsonRequest(url=url)
+ def parse(self, response: Response, **kwargs: Any) -> Any:
+ yield JsonRequest(
+ re.search(r"loadGeoJson\(\n\s+'(https://.+)', {", response.text).group(1), callback=self.parse_geojson
+ )
- def parse(self, response):
+ def parse_geojson(self, response: Response, **kwargs: Any) -> Any:
for location in response.json()["features"]:
if (
not location["properties"]["status"]["open"]
@@ -30,6 +34,8 @@
if location["properties"]["status"]["express"]:
item["brand"] = "Nero Express"
+ item["branch"] = item.pop("name")
+
item["opening_hours"] = OpeningHours()
for day_name, day_hours in location["properties"]["hoursRegular"].items():
if day_hours["open"] == "closed" or day_hours["close"] == "closed":
| {"golden_diff": "diff --git a/locations/spiders/caffe_nero_gb.py b/locations/spiders/caffe_nero_gb.py\n--- a/locations/spiders/caffe_nero_gb.py\n+++ b/locations/spiders/caffe_nero_gb.py\n@@ -1,5 +1,8 @@\n+import re\n+from typing import Any\n+\n from scrapy import Spider\n-from scrapy.http import JsonRequest\n+from scrapy.http import JsonRequest, Response\n \n from locations.categories import Categories, Extras, apply_category, apply_yes_no\n from locations.dict_parser import DictParser\n@@ -9,14 +12,15 @@\n class CaffeNeroGBSpider(Spider):\n name = \"caffe_nero_gb\"\n item_attributes = {\"brand\": \"Caffe Nero\", \"brand_wikidata\": \"Q675808\"}\n- allowed_domains = [\"caffenero-webassets-production.s3.eu-west-2.amazonaws.com\"]\n- start_urls = [\"https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json\"]\n+ allowed_domains = [\"caffenero.com\", \"caffenerowebsite.blob.core.windows.net\"]\n+ start_urls = [\"https://caffenero.com/uk/stores/\"]\n \n- def start_requests(self):\n- for url in self.start_urls:\n- yield JsonRequest(url=url)\n+ def parse(self, response: Response, **kwargs: Any) -> Any:\n+ yield JsonRequest(\n+ re.search(r\"loadGeoJson\\(\\n\\s+'(https://.+)', {\", response.text).group(1), callback=self.parse_geojson\n+ )\n \n- def parse(self, response):\n+ def parse_geojson(self, response: Response, **kwargs: Any) -> Any:\n for location in response.json()[\"features\"]:\n if (\n not location[\"properties\"][\"status\"][\"open\"]\n@@ -30,6 +34,8 @@\n if location[\"properties\"][\"status\"][\"express\"]:\n item[\"brand\"] = \"Nero Express\"\n \n+ item[\"branch\"] = item.pop(\"name\")\n+\n item[\"opening_hours\"] = OpeningHours()\n for day_name, day_hours in location[\"properties\"][\"hoursRegular\"].items():\n if day_hours[\"open\"] == \"closed\" or day_hours[\"close\"] == \"closed\":\n", "issue": "Caffe Nero GB spider using outdated JSON file\nThe caffe_nero_gb.py spider gets its data from JSON file that the Store Finder page at https://caffenero.com/uk/stores/ uses to display its map. However, it looks like that URL of that JSON file has changed, and ATP is still referencing the old (and no longer updated one).\r\n\r\nThe ATP code currently has\r\n`allowed_domains = [\"caffenero-webassets-production.s3.eu-west-2.amazonaws.com\"]`\r\n`start_urls = [\"https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json\"]`\r\nBut the URL referenced by https://caffenero.com/uk/stores/ is now\r\nhttps://caffenerowebsite.blob.core.windows.net/production/data/stores/stores-gb.json\r\n\r\nI think the format of the JSON file has remained the same, so it should just be a matter of swapping the URLs over.\r\n\r\nTo help issues like this be picked up sooner in the future, I wonder if there's a way of checking that the JSON URL used is still included in the https://caffenero.com/uk/stores/ page, and producing a warning to anyone running ATP if not?\n", "before_files": [{"content": "from scrapy import Spider\nfrom scrapy.http import JsonRequest\n\nfrom locations.categories import Categories, Extras, apply_category, apply_yes_no\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\n\n\nclass CaffeNeroGBSpider(Spider):\n name = \"caffe_nero_gb\"\n item_attributes = {\"brand\": \"Caffe Nero\", \"brand_wikidata\": \"Q675808\"}\n allowed_domains = [\"caffenero-webassets-production.s3.eu-west-2.amazonaws.com\"]\n start_urls = [\"https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json\"]\n\n def start_requests(self):\n for url in self.start_urls:\n yield JsonRequest(url=url)\n\n def parse(self, response):\n for location in response.json()[\"features\"]:\n if (\n not location[\"properties\"][\"status\"][\"open\"]\n or location[\"properties\"][\"status\"][\"opening_soon\"]\n or location[\"properties\"][\"status\"][\"temp_closed\"]\n ):\n continue\n\n item = DictParser.parse(location[\"properties\"])\n item[\"geometry\"] = location[\"geometry\"]\n if location[\"properties\"][\"status\"][\"express\"]:\n item[\"brand\"] = \"Nero Express\"\n\n item[\"opening_hours\"] = OpeningHours()\n for day_name, day_hours in location[\"properties\"][\"hoursRegular\"].items():\n if day_hours[\"open\"] == \"closed\" or day_hours[\"close\"] == \"closed\":\n continue\n if day_name == \"holiday\":\n continue\n item[\"opening_hours\"].add_range(day_name.title(), day_hours[\"open\"], day_hours[\"close\"])\n\n apply_yes_no(Extras.TAKEAWAY, item, location[\"properties\"][\"status\"][\"takeaway\"], False)\n apply_yes_no(Extras.DELIVERY, item, location[\"properties\"][\"status\"][\"delivery\"], False)\n apply_yes_no(Extras.WIFI, item, location[\"properties\"][\"amenities\"][\"wifi\"], False)\n apply_yes_no(Extras.TOILETS, item, location[\"properties\"][\"amenities\"][\"toilet\"], False)\n apply_yes_no(Extras.BABY_CHANGING_TABLE, item, location[\"properties\"][\"amenities\"][\"baby_change\"], False)\n apply_yes_no(Extras.SMOKING_AREA, item, location[\"properties\"][\"amenities\"][\"smoking_area\"], False)\n apply_yes_no(Extras.AIR_CONDITIONING, item, location[\"properties\"][\"amenities\"][\"air_conditioned\"], False)\n apply_yes_no(Extras.WHEELCHAIR, item, location[\"properties\"][\"amenities\"].get(\"disabled_access\"), False)\n apply_yes_no(Extras.TOILETS_WHEELCHAIR, item, location[\"properties\"][\"amenities\"][\"disabled_toilet\"], False)\n apply_yes_no(Extras.OUTDOOR_SEATING, item, location[\"properties\"][\"amenities\"][\"outside_seating\"], False)\n apply_category(Categories.COFFEE_SHOP, item)\n\n item[\"website\"] = f'https://caffenero.com/uk/store/{location[\"properties\"][\"slug\"]}/'\n\n yield item\n", "path": "locations/spiders/caffe_nero_gb.py"}]} | 1,545 | 496 |
gh_patches_debug_24105 | rasdani/github-patches | git_diff | deepchecks__deepchecks-372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The mean value is not shown in the regression systematic error plot
I would expect that near the plot (or when I hover over the mean line in the plot), I would see the mean error value.

To reproduce:
https://www.kaggle.com/itay94/notebookf8c78e84d7
</issue>
<code>
[start of deepchecks/checks/performance/regression_systematic_error.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """The RegressionSystematicError check module."""
12 import plotly.graph_objects as go
13 from sklearn.base import BaseEstimator
14 from sklearn.metrics import mean_squared_error
15
16 from deepchecks import CheckResult, Dataset, SingleDatasetBaseCheck, ConditionResult
17 from deepchecks.utils.metrics import ModelType, task_type_validation
18 from deepchecks.utils.strings import format_number
19
20
21 __all__ = ['RegressionSystematicError']
22
23
24 class RegressionSystematicError(SingleDatasetBaseCheck):
25 """Check the regression systematic error."""
26
27 def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:
28 """Run check.
29
30 Arguments:
31 dataset (Dataset): A dataset object.
32 model (BaseEstimator): A scikit-learn-compatible fitted estimator instance
33 Returns:
34 CheckResult:
35 - value is a dict with rmse and mean prediction error.
36 - display is box plot of the prediction errorד.
37 Raises:
38 DeepchecksValueError: If the object is not a Dataset instance with a label
39 """
40 return self._regression_error_distribution(dataset, model)
41
42 def _regression_error_distribution(self, dataset: Dataset, model: BaseEstimator):
43 Dataset.validate_dataset(dataset)
44 dataset.validate_label()
45 task_type_validation(model, dataset, [ModelType.REGRESSION])
46
47 y_test = dataset.label_col
48 y_pred = model.predict(dataset.features_columns)
49
50 rmse = mean_squared_error(dataset.label_col, y_pred, squared=False)
51 diff = y_test - y_pred
52 diff_mean = diff.mean()
53
54 display = [
55 'Non-zero mean of the error distribution indicated the presents '
56 'of systematic error in model predictions',
57 go.Figure()
58 .add_trace(go.Box(
59 x=diff.values,
60 name='Model Prediction Error',
61 boxpoints='suspectedoutliers',
62 marker=dict(outliercolor='red'),
63 hoverinfo='x',
64 hoveron='points'))
65 .update_layout(
66 title_text='Box plot of the model prediction error',
67 width=800, height=500)
68 .add_vline(
69 x=diff_mean + 0.01,
70 line_dash='dash',
71 annotation_text='Mean error')
72 ]
73
74 return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)
75
76 def add_condition_systematic_error_ratio_to_rmse_not_greater_than(self, max_ratio: float = 0.01):
77 """Add condition - require the absolute mean systematic error to be not greater than (max_ratio * RMSE).
78
79 Args:
80 max_ratio (float): Maximum ratio
81 """
82 def max_bias_condition(result: float) -> ConditionResult:
83 rmse = result['rmse']
84 mean_error = result['mean_error']
85 if abs(mean_error) > max_ratio * rmse:
86 return ConditionResult(False,
87 f'mean error: {format_number(mean_error, 5)}, RMSE: {format_number(rmse)}')
88 else:
89 return ConditionResult(True)
90
91 return self.add_condition(f'Bias ratio is not greater than {format_number(max_ratio)}',
92 max_bias_condition)
93
[end of deepchecks/checks/performance/regression_systematic_error.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepchecks/checks/performance/regression_systematic_error.py b/deepchecks/checks/performance/regression_systematic_error.py
--- a/deepchecks/checks/performance/regression_systematic_error.py
+++ b/deepchecks/checks/performance/regression_systematic_error.py
@@ -51,24 +51,19 @@
diff = y_test - y_pred
diff_mean = diff.mean()
+ fig = go.Figure()
+ fig.add_trace(go.Box(
+ x=diff,
+ name='Model prediction error',
+ boxmean=True # represent mean
+ )).update_layout(
+ title_text='Box plot of the model prediction error',
+ width=800, height=500)
+
display = [
'Non-zero mean of the error distribution indicated the presents '
'of systematic error in model predictions',
- go.Figure()
- .add_trace(go.Box(
- x=diff.values,
- name='Model Prediction Error',
- boxpoints='suspectedoutliers',
- marker=dict(outliercolor='red'),
- hoverinfo='x',
- hoveron='points'))
- .update_layout(
- title_text='Box plot of the model prediction error',
- width=800, height=500)
- .add_vline(
- x=diff_mean + 0.01,
- line_dash='dash',
- annotation_text='Mean error')
+ fig
]
return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)
| {"golden_diff": "diff --git a/deepchecks/checks/performance/regression_systematic_error.py b/deepchecks/checks/performance/regression_systematic_error.py\n--- a/deepchecks/checks/performance/regression_systematic_error.py\n+++ b/deepchecks/checks/performance/regression_systematic_error.py\n@@ -51,24 +51,19 @@\n diff = y_test - y_pred\n diff_mean = diff.mean()\n \n+ fig = go.Figure()\n+ fig.add_trace(go.Box(\n+ x=diff,\n+ name='Model prediction error',\n+ boxmean=True # represent mean\n+ )).update_layout(\n+ title_text='Box plot of the model prediction error',\n+ width=800, height=500)\n+\n display = [\n 'Non-zero mean of the error distribution indicated the presents '\n 'of systematic error in model predictions',\n- go.Figure()\n- .add_trace(go.Box(\n- x=diff.values,\n- name='Model Prediction Error',\n- boxpoints='suspectedoutliers',\n- marker=dict(outliercolor='red'),\n- hoverinfo='x',\n- hoveron='points'))\n- .update_layout(\n- title_text='Box plot of the model prediction error',\n- width=800, height=500)\n- .add_vline(\n- x=diff_mean + 0.01,\n- line_dash='dash',\n- annotation_text='Mean error')\n+ fig\n ]\n \n return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)\n", "issue": "The mean value is not shown in the regression systematic error plot\nI would expect that near the plot (or when I hover over the mean line in the plot), I would see the mean error value.\r\n\r\n\r\n\r\nTo reproduce:\r\nhttps://www.kaggle.com/itay94/notebookf8c78e84d7\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The RegressionSystematicError check module.\"\"\"\nimport plotly.graph_objects as go\nfrom sklearn.base import BaseEstimator\nfrom sklearn.metrics import mean_squared_error\n\nfrom deepchecks import CheckResult, Dataset, SingleDatasetBaseCheck, ConditionResult\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\nfrom deepchecks.utils.strings import format_number\n\n\n__all__ = ['RegressionSystematicError']\n\n\nclass RegressionSystematicError(SingleDatasetBaseCheck):\n \"\"\"Check the regression systematic error.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Arguments:\n dataset (Dataset): A dataset object.\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n Returns:\n CheckResult:\n - value is a dict with rmse and mean prediction error.\n - display is box plot of the prediction error\u05d3.\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._regression_error_distribution(dataset, model)\n\n def _regression_error_distribution(self, dataset: Dataset, model: BaseEstimator):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.REGRESSION])\n\n y_test = dataset.label_col\n y_pred = model.predict(dataset.features_columns)\n\n rmse = mean_squared_error(dataset.label_col, y_pred, squared=False)\n diff = y_test - y_pred\n diff_mean = diff.mean()\n\n display = [\n 'Non-zero mean of the error distribution indicated the presents '\n 'of systematic error in model predictions',\n go.Figure()\n .add_trace(go.Box(\n x=diff.values,\n name='Model Prediction Error',\n boxpoints='suspectedoutliers',\n marker=dict(outliercolor='red'),\n hoverinfo='x',\n hoveron='points'))\n .update_layout(\n title_text='Box plot of the model prediction error',\n width=800, height=500)\n .add_vline(\n x=diff_mean + 0.01,\n line_dash='dash',\n annotation_text='Mean error')\n ]\n\n return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)\n\n def add_condition_systematic_error_ratio_to_rmse_not_greater_than(self, max_ratio: float = 0.01):\n \"\"\"Add condition - require the absolute mean systematic error to be not greater than (max_ratio * RMSE).\n\n Args:\n max_ratio (float): Maximum ratio\n \"\"\"\n def max_bias_condition(result: float) -> ConditionResult:\n rmse = result['rmse']\n mean_error = result['mean_error']\n if abs(mean_error) > max_ratio * rmse:\n return ConditionResult(False,\n f'mean error: {format_number(mean_error, 5)}, RMSE: {format_number(rmse)}')\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Bias ratio is not greater than {format_number(max_ratio)}',\n max_bias_condition)\n", "path": "deepchecks/checks/performance/regression_systematic_error.py"}]} | 1,624 | 359 |
gh_patches_debug_39617 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-150 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Expose the full response from the token server
`refresh` on oauth2 Credentials should store the full response from the token server. There is potentially useful data here, like the `id_token`.
</issue>
<code>
[start of google/oauth2/credentials.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """OAuth 2.0 Credentials.
16
17 This module provides credentials based on OAuth 2.0 access and refresh tokens.
18 These credentials usually access resources on behalf of a user (resource
19 owner).
20
21 Specifically, this is intended to use access tokens acquired using the
22 `Authorization Code grant`_ and can refresh those tokens using a
23 optional `refresh token`_.
24
25 Obtaining the initial access and refresh token is outside of the scope of this
26 module. Consult `rfc6749 section 4.1`_ for complete details on the
27 Authorization Code grant flow.
28
29 .. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
30 .. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
31 .. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
32 """
33
34 from google.auth import _helpers
35 from google.auth import credentials
36 from google.oauth2 import _client
37
38
39 class Credentials(credentials.Scoped, credentials.Credentials):
40 """Credentials using OAuth 2.0 access and refresh tokens."""
41
42 def __init__(self, token, refresh_token=None, token_uri=None,
43 client_id=None, client_secret=None, scopes=None):
44 """
45 Args:
46 token (Optional(str)): The OAuth 2.0 access token. Can be None
47 if refresh information is provided.
48 refresh_token (str): The OAuth 2.0 refresh token. If specified,
49 credentials can be refreshed.
50 token_uri (str): The OAuth 2.0 authorization server's token
51 endpoint URI. Must be specified for refresh, can be left as
52 None if the token can not be refreshed.
53 client_id (str): The OAuth 2.0 client ID. Must be specified for
54 refresh, can be left as None if the token can not be refreshed.
55 client_secret(str): The OAuth 2.0 client secret. Must be specified
56 for refresh, can be left as None if the token can not be
57 refreshed.
58 scopes (Sequence[str]): The scopes that were originally used
59 to obtain authorization. This is a purely informative parameter
60 that can be used by :meth:`has_scopes`. OAuth 2.0 credentials
61 can not request additional scopes after authorization.
62 """
63 super(Credentials, self).__init__()
64 self.token = token
65 self._refresh_token = refresh_token
66 self._scopes = scopes
67 self._token_uri = token_uri
68 self._client_id = client_id
69 self._client_secret = client_secret
70
71 @property
72 def refresh_token(self):
73 """Optional[str]: The OAuth 2.0 refresh token."""
74 return self._refresh_token
75
76 @property
77 def token_uri(self):
78 """Optional[str]: The OAuth 2.0 authorization server's token endpoint
79 URI."""
80 return self._token_uri
81
82 @property
83 def client_id(self):
84 """Optional[str]: The OAuth 2.0 client ID."""
85 return self._client_id
86
87 @property
88 def client_secret(self):
89 """Optional[str]: The OAuth 2.0 client secret."""
90 return self._client_secret
91
92 @property
93 def requires_scopes(self):
94 """False: OAuth 2.0 credentials have their scopes set when
95 the initial token is requested and can not be changed."""
96 return False
97
98 def with_scopes(self, scopes):
99 """Unavailable, OAuth 2.0 credentials can not be re-scoped.
100
101 OAuth 2.0 credentials have their scopes set when the initial token is
102 requested and can not be changed.
103 """
104 raise NotImplementedError(
105 'OAuth 2.0 Credentials can not modify their scopes.')
106
107 @_helpers.copy_docstring(credentials.Credentials)
108 def refresh(self, request):
109 access_token, refresh_token, expiry, _ = _client.refresh_grant(
110 request, self._token_uri, self._refresh_token, self._client_id,
111 self._client_secret)
112
113 self.token = access_token
114 self.expiry = expiry
115 self._refresh_token = refresh_token
116
[end of google/oauth2/credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py
--- a/google/oauth2/credentials.py
+++ b/google/oauth2/credentials.py
@@ -39,14 +39,16 @@
class Credentials(credentials.Scoped, credentials.Credentials):
"""Credentials using OAuth 2.0 access and refresh tokens."""
- def __init__(self, token, refresh_token=None, token_uri=None,
- client_id=None, client_secret=None, scopes=None):
+ def __init__(self, token, refresh_token=None, id_token=None,
+ token_uri=None, client_id=None, client_secret=None,
+ scopes=None):
"""
Args:
token (Optional(str)): The OAuth 2.0 access token. Can be None
if refresh information is provided.
refresh_token (str): The OAuth 2.0 refresh token. If specified,
credentials can be refreshed.
+ id_token (str): The Open ID Connect ID Token.
token_uri (str): The OAuth 2.0 authorization server's token
endpoint URI. Must be specified for refresh, can be left as
None if the token can not be refreshed.
@@ -63,6 +65,7 @@
super(Credentials, self).__init__()
self.token = token
self._refresh_token = refresh_token
+ self._id_token = id_token
self._scopes = scopes
self._token_uri = token_uri
self._client_id = client_id
@@ -79,6 +82,17 @@
URI."""
return self._token_uri
+ @property
+ def id_token(self):
+ """Optional[str]: The Open ID Connect ID Token.
+
+ Depending on the authorization server and the scopes requested, this
+ may be populated when credentials are obtained and updated when
+ :meth:`refresh` is called. This token is a JWT. It can be verified
+ and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`.
+ """
+ return self._id_token
+
@property
def client_id(self):
"""Optional[str]: The OAuth 2.0 client ID."""
@@ -106,10 +120,12 @@
@_helpers.copy_docstring(credentials.Credentials)
def refresh(self, request):
- access_token, refresh_token, expiry, _ = _client.refresh_grant(
- request, self._token_uri, self._refresh_token, self._client_id,
- self._client_secret)
+ access_token, refresh_token, expiry, grant_response = (
+ _client.refresh_grant(
+ request, self._token_uri, self._refresh_token, self._client_id,
+ self._client_secret))
self.token = access_token
self.expiry = expiry
self._refresh_token = refresh_token
+ self._id_token = grant_response.get('id_token')
| {"golden_diff": "diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py\n--- a/google/oauth2/credentials.py\n+++ b/google/oauth2/credentials.py\n@@ -39,14 +39,16 @@\n class Credentials(credentials.Scoped, credentials.Credentials):\n \"\"\"Credentials using OAuth 2.0 access and refresh tokens.\"\"\"\n \n- def __init__(self, token, refresh_token=None, token_uri=None,\n- client_id=None, client_secret=None, scopes=None):\n+ def __init__(self, token, refresh_token=None, id_token=None,\n+ token_uri=None, client_id=None, client_secret=None,\n+ scopes=None):\n \"\"\"\n Args:\n token (Optional(str)): The OAuth 2.0 access token. Can be None\n if refresh information is provided.\n refresh_token (str): The OAuth 2.0 refresh token. If specified,\n credentials can be refreshed.\n+ id_token (str): The Open ID Connect ID Token.\n token_uri (str): The OAuth 2.0 authorization server's token\n endpoint URI. Must be specified for refresh, can be left as\n None if the token can not be refreshed.\n@@ -63,6 +65,7 @@\n super(Credentials, self).__init__()\n self.token = token\n self._refresh_token = refresh_token\n+ self._id_token = id_token\n self._scopes = scopes\n self._token_uri = token_uri\n self._client_id = client_id\n@@ -79,6 +82,17 @@\n URI.\"\"\"\n return self._token_uri\n \n+ @property\n+ def id_token(self):\n+ \"\"\"Optional[str]: The Open ID Connect ID Token.\n+\n+ Depending on the authorization server and the scopes requested, this\n+ may be populated when credentials are obtained and updated when\n+ :meth:`refresh` is called. This token is a JWT. It can be verified\n+ and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`.\n+ \"\"\"\n+ return self._id_token\n+\n @property\n def client_id(self):\n \"\"\"Optional[str]: The OAuth 2.0 client ID.\"\"\"\n@@ -106,10 +120,12 @@\n \n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n- access_token, refresh_token, expiry, _ = _client.refresh_grant(\n- request, self._token_uri, self._refresh_token, self._client_id,\n- self._client_secret)\n+ access_token, refresh_token, expiry, grant_response = (\n+ _client.refresh_grant(\n+ request, self._token_uri, self._refresh_token, self._client_id,\n+ self._client_secret))\n \n self.token = access_token\n self.expiry = expiry\n self._refresh_token = refresh_token\n+ self._id_token = grant_response.get('id_token')\n", "issue": "Expose the full response from the token server\n`refresh` on oauth2 Credentials should store the full response from the token server. There is potentially useful data here, like the `id_token`.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OAuth 2.0 Credentials.\n\nThis module provides credentials based on OAuth 2.0 access and refresh tokens.\nThese credentials usually access resources on behalf of a user (resource\nowner).\n\nSpecifically, this is intended to use access tokens acquired using the\n`Authorization Code grant`_ and can refresh those tokens using a\noptional `refresh token`_.\n\nObtaining the initial access and refresh token is outside of the scope of this\nmodule. Consult `rfc6749 section 4.1`_ for complete details on the\nAuthorization Code grant flow.\n\n.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1\n.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6\n.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1\n\"\"\"\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.oauth2 import _client\n\n\nclass Credentials(credentials.Scoped, credentials.Credentials):\n \"\"\"Credentials using OAuth 2.0 access and refresh tokens.\"\"\"\n\n def __init__(self, token, refresh_token=None, token_uri=None,\n client_id=None, client_secret=None, scopes=None):\n \"\"\"\n Args:\n token (Optional(str)): The OAuth 2.0 access token. Can be None\n if refresh information is provided.\n refresh_token (str): The OAuth 2.0 refresh token. If specified,\n credentials can be refreshed.\n token_uri (str): The OAuth 2.0 authorization server's token\n endpoint URI. Must be specified for refresh, can be left as\n None if the token can not be refreshed.\n client_id (str): The OAuth 2.0 client ID. Must be specified for\n refresh, can be left as None if the token can not be refreshed.\n client_secret(str): The OAuth 2.0 client secret. Must be specified\n for refresh, can be left as None if the token can not be\n refreshed.\n scopes (Sequence[str]): The scopes that were originally used\n to obtain authorization. This is a purely informative parameter\n that can be used by :meth:`has_scopes`. OAuth 2.0 credentials\n can not request additional scopes after authorization.\n \"\"\"\n super(Credentials, self).__init__()\n self.token = token\n self._refresh_token = refresh_token\n self._scopes = scopes\n self._token_uri = token_uri\n self._client_id = client_id\n self._client_secret = client_secret\n\n @property\n def refresh_token(self):\n \"\"\"Optional[str]: The OAuth 2.0 refresh token.\"\"\"\n return self._refresh_token\n\n @property\n def token_uri(self):\n \"\"\"Optional[str]: The OAuth 2.0 authorization server's token endpoint\n URI.\"\"\"\n return self._token_uri\n\n @property\n def client_id(self):\n \"\"\"Optional[str]: The OAuth 2.0 client ID.\"\"\"\n return self._client_id\n\n @property\n def client_secret(self):\n \"\"\"Optional[str]: The OAuth 2.0 client secret.\"\"\"\n return self._client_secret\n\n @property\n def requires_scopes(self):\n \"\"\"False: OAuth 2.0 credentials have their scopes set when\n the initial token is requested and can not be changed.\"\"\"\n return False\n\n def with_scopes(self, scopes):\n \"\"\"Unavailable, OAuth 2.0 credentials can not be re-scoped.\n\n OAuth 2.0 credentials have their scopes set when the initial token is\n requested and can not be changed.\n \"\"\"\n raise NotImplementedError(\n 'OAuth 2.0 Credentials can not modify their scopes.')\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n access_token, refresh_token, expiry, _ = _client.refresh_grant(\n request, self._token_uri, self._refresh_token, self._client_id,\n self._client_secret)\n\n self.token = access_token\n self.expiry = expiry\n self._refresh_token = refresh_token\n", "path": "google/oauth2/credentials.py"}]} | 1,848 | 641 |
gh_patches_debug_18776 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`context` sometimes gets printed with the `set` command
`set $rax=0` sometimes causes `context` to be immediately called afterwards. I think don't think this is always reproducible, but will keep investigating it.
</issue>
<code>
[start of pwndbg/gdblib/prompt.py]
1 import re
2
3 import gdb
4
5 import pwndbg.decorators
6 import pwndbg.gdblib.events
7 import pwndbg.gdbutils
8 import pwndbg.lib.memoize
9 from pwndbg.color import disable_colors
10 from pwndbg.color import message
11 from pwndbg.lib.tips import get_tip_of_the_day
12
13 funcs_list_str = ", ".join(
14 message.notice("$" + f.name) for f in pwndbg.gdbutils.functions.functions
15 )
16
17 num_pwndbg_cmds = sum(1 for _ in filter(lambda c: not c.shell, pwndbg.commands.commands))
18 num_shell_cmds = sum(1 for _ in filter(lambda c: c.shell, pwndbg.commands.commands))
19 hint_lines = (
20 "loaded %i pwndbg commands and %i shell commands. Type %s for a list."
21 % (num_pwndbg_cmds, num_shell_cmds, message.notice("pwndbg [--shell | --all] [filter]")),
22 "created %s gdb functions (can be used with print/break)" % funcs_list_str,
23 )
24
25 for line in hint_lines:
26 print(message.prompt("pwndbg: ") + message.system(line))
27
28 # noinspection PyPackageRequirements
29 show_tip = pwndbg.config.Parameter(
30 "show-tips", True, "whether to display the tip of the day on startup"
31 )
32
33 cur = None
34
35
36 def initial_hook(*a):
37 if show_tip and not pwndbg.decorators.first_prompt:
38 colored_tip = re.sub(
39 "`(.*?)`", lambda s: message.warn(s.group()[1:-1]), get_tip_of_the_day()
40 )
41 print(
42 message.prompt("------- tip of the day")
43 + message.system(" (disable with %s)" % message.notice("set show-tips off"))
44 + message.prompt(" -------")
45 )
46 print((colored_tip))
47 pwndbg.decorators.first_prompt = True
48
49 prompt_hook(*a)
50 gdb.prompt_hook = prompt_hook
51
52
53 def prompt_hook(*a):
54 global cur
55
56 new = (gdb.selected_inferior(), gdb.selected_thread())
57
58 if cur != new:
59 pwndbg.gdblib.events.after_reload(start=cur is None)
60 cur = new
61
62 if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:
63 prompt_hook_on_stop(*a)
64
65
66 @pwndbg.lib.memoize.reset_on_stop
67 def prompt_hook_on_stop(*a):
68 pwndbg.commands.context.context()
69
70
71 @pwndbg.config.Trigger([message.config_prompt_color, disable_colors])
72 def set_prompt():
73 prompt = "pwndbg> "
74
75 if not disable_colors:
76 prompt = "\x02" + prompt + "\x01" # STX + prompt + SOH
77 prompt = message.prompt(prompt)
78 prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
79
80 gdb.execute("set prompt %s" % prompt)
81
82
83 if pwndbg.gdblib.events.before_prompt_event.is_real_event:
84 gdb.prompt_hook = initial_hook
85
86 else:
87 # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook
88 def extended_prompt_hook(*a):
89 pwndbg.gdblib.events.before_prompt_event.invoke_callbacks()
90 return prompt_hook(*a)
91
92 gdb.prompt_hook = extended_prompt_hook
93
[end of pwndbg/gdblib/prompt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/gdblib/prompt.py b/pwndbg/gdblib/prompt.py
--- a/pwndbg/gdblib/prompt.py
+++ b/pwndbg/gdblib/prompt.py
@@ -50,8 +50,11 @@
gdb.prompt_hook = prompt_hook
+context_shown = False
+
+
def prompt_hook(*a):
- global cur
+ global cur, context_shown
new = (gdb.selected_inferior(), gdb.selected_thread())
@@ -59,13 +62,15 @@
pwndbg.gdblib.events.after_reload(start=cur is None)
cur = new
- if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:
- prompt_hook_on_stop(*a)
+ if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped and not context_shown:
+ pwndbg.commands.context.context()
+ context_shown = True
[email protected]_on_stop
-def prompt_hook_on_stop(*a):
- pwndbg.commands.context.context()
[email protected]
+def reset_context_shown(*a):
+ global context_shown
+ context_shown = False
@pwndbg.config.Trigger([message.config_prompt_color, disable_colors])
| {"golden_diff": "diff --git a/pwndbg/gdblib/prompt.py b/pwndbg/gdblib/prompt.py\n--- a/pwndbg/gdblib/prompt.py\n+++ b/pwndbg/gdblib/prompt.py\n@@ -50,8 +50,11 @@\n gdb.prompt_hook = prompt_hook\n \n \n+context_shown = False\n+\n+\n def prompt_hook(*a):\n- global cur\n+ global cur, context_shown\n \n new = (gdb.selected_inferior(), gdb.selected_thread())\n \n@@ -59,13 +62,15 @@\n pwndbg.gdblib.events.after_reload(start=cur is None)\n cur = new\n \n- if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n- prompt_hook_on_stop(*a)\n+ if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped and not context_shown:\n+ pwndbg.commands.context.context()\n+ context_shown = True\n \n \[email protected]_on_stop\n-def prompt_hook_on_stop(*a):\n- pwndbg.commands.context.context()\[email protected]\n+def reset_context_shown(*a):\n+ global context_shown\n+ context_shown = False\n \n \n @pwndbg.config.Trigger([message.config_prompt_color, disable_colors])\n", "issue": "`context` sometimes gets printed with the `set` command\n`set $rax=0` sometimes causes `context` to be immediately called afterwards. I think don't think this is always reproducible, but will keep investigating it.\n", "before_files": [{"content": "import re\n\nimport gdb\n\nimport pwndbg.decorators\nimport pwndbg.gdblib.events\nimport pwndbg.gdbutils\nimport pwndbg.lib.memoize\nfrom pwndbg.color import disable_colors\nfrom pwndbg.color import message\nfrom pwndbg.lib.tips import get_tip_of_the_day\n\nfuncs_list_str = \", \".join(\n message.notice(\"$\" + f.name) for f in pwndbg.gdbutils.functions.functions\n)\n\nnum_pwndbg_cmds = sum(1 for _ in filter(lambda c: not c.shell, pwndbg.commands.commands))\nnum_shell_cmds = sum(1 for _ in filter(lambda c: c.shell, pwndbg.commands.commands))\nhint_lines = (\n \"loaded %i pwndbg commands and %i shell commands. Type %s for a list.\"\n % (num_pwndbg_cmds, num_shell_cmds, message.notice(\"pwndbg [--shell | --all] [filter]\")),\n \"created %s gdb functions (can be used with print/break)\" % funcs_list_str,\n)\n\nfor line in hint_lines:\n print(message.prompt(\"pwndbg: \") + message.system(line))\n\n# noinspection PyPackageRequirements\nshow_tip = pwndbg.config.Parameter(\n \"show-tips\", True, \"whether to display the tip of the day on startup\"\n)\n\ncur = None\n\n\ndef initial_hook(*a):\n if show_tip and not pwndbg.decorators.first_prompt:\n colored_tip = re.sub(\n \"`(.*?)`\", lambda s: message.warn(s.group()[1:-1]), get_tip_of_the_day()\n )\n print(\n message.prompt(\"------- tip of the day\")\n + message.system(\" (disable with %s)\" % message.notice(\"set show-tips off\"))\n + message.prompt(\" -------\")\n )\n print((colored_tip))\n pwndbg.decorators.first_prompt = True\n\n prompt_hook(*a)\n gdb.prompt_hook = prompt_hook\n\n\ndef prompt_hook(*a):\n global cur\n\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.gdblib.events.after_reload(start=cur is None)\n cur = new\n\n if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n prompt_hook_on_stop(*a)\n\n\[email protected]_on_stop\ndef prompt_hook_on_stop(*a):\n pwndbg.commands.context.context()\n\n\[email protected]([message.config_prompt_color, disable_colors])\ndef set_prompt():\n prompt = \"pwndbg> \"\n\n if not disable_colors:\n prompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\n prompt = message.prompt(prompt)\n prompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\n gdb.execute(\"set prompt %s\" % prompt)\n\n\nif pwndbg.gdblib.events.before_prompt_event.is_real_event:\n gdb.prompt_hook = initial_hook\n\nelse:\n # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook\n def extended_prompt_hook(*a):\n pwndbg.gdblib.events.before_prompt_event.invoke_callbacks()\n return prompt_hook(*a)\n\n gdb.prompt_hook = extended_prompt_hook\n", "path": "pwndbg/gdblib/prompt.py"}]} | 1,504 | 302 |
gh_patches_debug_851 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1893 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'pwn cyclic -o afca' throws a BytesWarning
```
$ pwn cyclic -o afca
/Users/heapcrash/pwntools/pwnlib/commandline/cyclic.py:74: BytesWarning: Text is not bytes; assuming ASCII, no guarantees. See https://docs.pwntools.com/#bytes
pat = flat(pat, bytes=args.length)
506
```
</issue>
<code>
[start of pwnlib/commandline/cyclic.py]
1 #!/usr/bin/env python2
2 from __future__ import absolute_import
3 from __future__ import division
4
5 import argparse
6 import six
7 import string
8 import sys
9
10 import pwnlib.args
11 pwnlib.args.free_form = False
12
13 from pwn import *
14 from pwnlib.commandline import common
15
16 parser = common.parser_commands.add_parser(
17 'cyclic',
18 help = "Cyclic pattern creator/finder",
19 description = "Cyclic pattern creator/finder"
20 )
21
22 parser.add_argument(
23 '-a', '--alphabet',
24 metavar = 'alphabet',
25 default = string.ascii_lowercase.encode(),
26 type = packing._encode,
27 help = 'The alphabet to use in the cyclic pattern (defaults to all lower case letters)',
28 )
29
30 parser.add_argument(
31 '-n', '--length',
32 metavar = 'length',
33 default = 4,
34 type = int,
35 help = 'Size of the unique subsequences (defaults to 4).'
36 )
37
38 parser.add_argument(
39 '-c', '--context',
40 metavar = 'context',
41 action = 'append',
42 type = common.context_arg,
43 choices = common.choices,
44 help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,
45 )
46
47 group = parser.add_mutually_exclusive_group(required=False)
48 group.add_argument(
49 '-l', '-o', '--offset', '--lookup',
50 dest = 'lookup',
51 metavar = 'lookup_value',
52 help = 'Do a lookup instead printing the alphabet',
53 )
54
55 group.add_argument(
56 'count',
57 type=int,
58 nargs='?',
59 default=None,
60 help='Number of characters to print'
61 )
62
63 def main(args):
64 alphabet = args.alphabet
65 subsize = args.length
66
67 if args.lookup:
68 pat = args.lookup
69
70 try:
71 pat = int(pat, 0)
72 except ValueError:
73 pass
74 pat = flat(pat, bytes=args.length)
75
76 if len(pat) != subsize:
77 log.critical('Subpattern must be %d bytes' % subsize)
78 sys.exit(1)
79
80 if not all(c in alphabet for c in pat):
81 log.critical('Pattern contains characters not present in the alphabet')
82 sys.exit(1)
83
84 offset = cyclic_find(pat, alphabet, subsize)
85
86 if offset == -1:
87 log.critical('Given pattern does not exist in cyclic pattern')
88 sys.exit(1)
89 else:
90 print(offset)
91 else:
92 want = args.count
93 result = cyclic(want, alphabet, subsize)
94 got = len(result)
95 if want is not None and got < want:
96 log.failure("Alphabet too small (max length = %i)" % got)
97
98 out = getattr(sys.stdout, 'buffer', sys.stdout)
99 out.write(result)
100
101 if out.isatty():
102 out.write(b'\n')
103
104 if __name__ == '__main__':
105 pwnlib.commandline.common.main(__file__)
106
[end of pwnlib/commandline/cyclic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/commandline/cyclic.py b/pwnlib/commandline/cyclic.py
--- a/pwnlib/commandline/cyclic.py
+++ b/pwnlib/commandline/cyclic.py
@@ -67,6 +67,9 @@
if args.lookup:
pat = args.lookup
+ if six.PY3:
+ pat = bytes(pat, encoding='utf-8')
+
try:
pat = int(pat, 0)
except ValueError:
| {"golden_diff": "diff --git a/pwnlib/commandline/cyclic.py b/pwnlib/commandline/cyclic.py\n--- a/pwnlib/commandline/cyclic.py\n+++ b/pwnlib/commandline/cyclic.py\n@@ -67,6 +67,9 @@\n if args.lookup:\n pat = args.lookup\n \n+ if six.PY3:\n+ pat = bytes(pat, encoding='utf-8')\n+\n try:\n pat = int(pat, 0)\n except ValueError:\n", "issue": "'pwn cyclic -o afca' throws a BytesWarning\n\r\n```\r\n$ pwn cyclic -o afca\r\n/Users/heapcrash/pwntools/pwnlib/commandline/cyclic.py:74: BytesWarning: Text is not bytes; assuming ASCII, no guarantees. See https://docs.pwntools.com/#bytes\r\n pat = flat(pat, bytes=args.length)\r\n506\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport six\nimport string\nimport sys\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nparser = common.parser_commands.add_parser(\n 'cyclic',\n help = \"Cyclic pattern creator/finder\",\n description = \"Cyclic pattern creator/finder\"\n)\n\nparser.add_argument(\n '-a', '--alphabet',\n metavar = 'alphabet',\n default = string.ascii_lowercase.encode(),\n type = packing._encode,\n help = 'The alphabet to use in the cyclic pattern (defaults to all lower case letters)',\n)\n\nparser.add_argument(\n '-n', '--length',\n metavar = 'length',\n default = 4,\n type = int,\n help = 'Size of the unique subsequences (defaults to 4).'\n)\n\nparser.add_argument(\n '-c', '--context',\n metavar = 'context',\n action = 'append',\n type = common.context_arg,\n choices = common.choices,\n help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,\n)\n\ngroup = parser.add_mutually_exclusive_group(required=False)\ngroup.add_argument(\n '-l', '-o', '--offset', '--lookup',\n dest = 'lookup',\n metavar = 'lookup_value',\n help = 'Do a lookup instead printing the alphabet',\n)\n\ngroup.add_argument(\n 'count',\n type=int,\n nargs='?',\n default=None,\n help='Number of characters to print'\n)\n\ndef main(args):\n alphabet = args.alphabet\n subsize = args.length\n\n if args.lookup:\n pat = args.lookup\n\n try:\n pat = int(pat, 0)\n except ValueError:\n pass\n pat = flat(pat, bytes=args.length)\n\n if len(pat) != subsize:\n log.critical('Subpattern must be %d bytes' % subsize)\n sys.exit(1)\n\n if not all(c in alphabet for c in pat):\n log.critical('Pattern contains characters not present in the alphabet')\n sys.exit(1)\n\n offset = cyclic_find(pat, alphabet, subsize)\n\n if offset == -1:\n log.critical('Given pattern does not exist in cyclic pattern')\n sys.exit(1)\n else:\n print(offset)\n else:\n want = args.count\n result = cyclic(want, alphabet, subsize)\n got = len(result)\n if want is not None and got < want:\n log.failure(\"Alphabet too small (max length = %i)\" % got)\n\n out = getattr(sys.stdout, 'buffer', sys.stdout)\n out.write(result)\n\n if out.isatty():\n out.write(b'\\n')\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/cyclic.py"}]} | 1,497 | 106 |
gh_patches_debug_14472 | rasdani/github-patches | git_diff | mytardis__mytardis-1507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NPM_FILE_PATTERNS - collectstatic failing on Windows
MyTardis currently uses `django-npm` to collect static content which has been npm installed into the `node_modules/` folder.
Because we don't necessarily want to copy everything from `node_modules/` when running `collectstatic`, MyTardis uses django-npm's `NPM_FILE_PATTERNS` setting in `tardis/default_settings/static_files.py`
See: https://github.com/kevin1024/django-npm#configuration
It can be used like this:
```
NPM_FILE_PATTERNS = {
'jquery': ['*'],
'jquery-migrate': ['*'],
}
```
to copy everything within `node_modules/jquery/` and everything within `node_modules/jquery-migrate/` into the static folder written to by `collectstatic`.
If you only want `collectstatic` to copy a subset of the files in the node_modules folder, `django-npm` provides the ability to use glob patterns like this:
```
NPM_FILE_PATTERNS = {
'bootstrap': ['dist/*'],
'font-awesome': ['css/*', 'fonts/*'],
}
```
However, these glob patterns don't seem to work on Windows, i.e. nothing is copied from the `node_modules` folders which have glob patterns more complex than `['*']`, see: https://github.com/kevin1024/django-npm/issues/15
A workaround (when running MyTardis's `collectstatic` on Windows) is to redefine `NPM_FILE_PATTERNS` in your `tardis/settings.py`, and just use the `['*']` pattern for every node module you want to be copied by `collectstatic`.
</issue>
<code>
[start of tardis/default_settings/static_files.py]
1 from os import path
2 from .storage import DEFAULT_STORAGE_BASE_DIR
3
4 # Absolute path to the directory that holds media.
5 # Example: "/home/media/media.lawrence.com/"
6 MEDIA_ROOT = DEFAULT_STORAGE_BASE_DIR
7
8 # Used by "django collectstatic"
9 STATIC_ROOT = path.abspath(path.join(path.dirname(__file__), '../..', 'static'))
10
11 # Use cachable copies of static files
12 STATICFILES_STORAGE = \
13 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
14
15 STATICFILES_FINDERS = (
16 'django.contrib.staticfiles.finders.FileSystemFinder',
17 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
18 'npm.finders.NpmFinder',
19 )
20
21 # django-npm settings:
22 NPM_ROOT_PATH = path.abspath(path.join(path.dirname(__file__), '../..'))
23
24 # If you have run "npm install", rather than "npm install --production",
25 # you will get a lot of devDependencies installed in node_modules/ which
26 # are only needed for development/testing (e.g. "npm test") and don't
27 # need to be copied when running collectstatic. NPM_FILE_PATTERNS
28 # specifies the folders within node_modules/ which do need to be copied:
29 NPM_FILE_PATTERNS = {
30 'angular': ['*'],
31 'angular-resource': ['*'],
32 'backbone': ['*'],
33 'backbone-forms': ['*'],
34 'blueimp-file-upload': ['*'],
35 'bootstrap': ['dist/*'],
36 'bootstrap-3-typeahead': ['*'],
37 'clipboard': ['*'],
38 'font-awesome': ['css/*', 'fonts/*'],
39 'jquery': ['*'],
40 'jquery-migrate': ['*'],
41 'jquery-ui-dist': ['jquery-ui.min.js'],
42 'mustache': ['mustache.min.js'],
43 'ng-dialog': ['*'],
44 'sprintf-js': ['dist/*'],
45 'underscore': ['*'],
46 'underscore.string': ['dist/*']
47 }
48
[end of tardis/default_settings/static_files.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tardis/default_settings/static_files.py b/tardis/default_settings/static_files.py
--- a/tardis/default_settings/static_files.py
+++ b/tardis/default_settings/static_files.py
@@ -32,16 +32,16 @@
'backbone': ['*'],
'backbone-forms': ['*'],
'blueimp-file-upload': ['*'],
- 'bootstrap': ['dist/*'],
+ 'bootstrap': ['*'],
'bootstrap-3-typeahead': ['*'],
'clipboard': ['*'],
- 'font-awesome': ['css/*', 'fonts/*'],
+ 'font-awesome': ['*'],
'jquery': ['*'],
'jquery-migrate': ['*'],
'jquery-ui-dist': ['jquery-ui.min.js'],
'mustache': ['mustache.min.js'],
'ng-dialog': ['*'],
- 'sprintf-js': ['dist/*'],
+ 'sprintf-js': ['*'],
'underscore': ['*'],
- 'underscore.string': ['dist/*']
+ 'underscore.string': ['*']
}
| {"golden_diff": "diff --git a/tardis/default_settings/static_files.py b/tardis/default_settings/static_files.py\n--- a/tardis/default_settings/static_files.py\n+++ b/tardis/default_settings/static_files.py\n@@ -32,16 +32,16 @@\n 'backbone': ['*'],\n 'backbone-forms': ['*'],\n 'blueimp-file-upload': ['*'],\n- 'bootstrap': ['dist/*'],\n+ 'bootstrap': ['*'],\n 'bootstrap-3-typeahead': ['*'],\n 'clipboard': ['*'],\n- 'font-awesome': ['css/*', 'fonts/*'],\n+ 'font-awesome': ['*'],\n 'jquery': ['*'],\n 'jquery-migrate': ['*'],\n 'jquery-ui-dist': ['jquery-ui.min.js'],\n 'mustache': ['mustache.min.js'],\n 'ng-dialog': ['*'],\n- 'sprintf-js': ['dist/*'],\n+ 'sprintf-js': ['*'],\n 'underscore': ['*'],\n- 'underscore.string': ['dist/*']\n+ 'underscore.string': ['*']\n }\n", "issue": "NPM_FILE_PATTERNS - collectstatic failing on Windows\nMyTardis currently uses `django-npm` to collect static content which has been npm installed into the `node_modules/` folder.\r\n\r\nBecause we don't necessarily want to copy everything from `node_modules/` when running `collectstatic`, MyTardis uses django-npm's `NPM_FILE_PATTERNS` setting in `tardis/default_settings/static_files.py`\r\n\r\nSee: https://github.com/kevin1024/django-npm#configuration\r\n\r\nIt can be used like this:\r\n\r\n```\r\nNPM_FILE_PATTERNS = {\r\n 'jquery': ['*'],\r\n 'jquery-migrate': ['*'],\r\n}\r\n```\r\n\r\nto copy everything within `node_modules/jquery/` and everything within `node_modules/jquery-migrate/` into the static folder written to by `collectstatic`.\r\n\r\nIf you only want `collectstatic` to copy a subset of the files in the node_modules folder, `django-npm` provides the ability to use glob patterns like this:\r\n\r\n```\r\nNPM_FILE_PATTERNS = {\r\n 'bootstrap': ['dist/*'],\r\n 'font-awesome': ['css/*', 'fonts/*'],\r\n}\r\n```\r\n\r\nHowever, these glob patterns don't seem to work on Windows, i.e. nothing is copied from the `node_modules` folders which have glob patterns more complex than `['*']`, see: https://github.com/kevin1024/django-npm/issues/15\r\n\r\nA workaround (when running MyTardis's `collectstatic` on Windows) is to redefine `NPM_FILE_PATTERNS` in your `tardis/settings.py`, and just use the `['*']` pattern for every node module you want to be copied by `collectstatic`.\n", "before_files": [{"content": "from os import path\nfrom .storage import DEFAULT_STORAGE_BASE_DIR\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = DEFAULT_STORAGE_BASE_DIR\n\n# Used by \"django collectstatic\"\nSTATIC_ROOT = path.abspath(path.join(path.dirname(__file__), '../..', 'static'))\n\n# Use cachable copies of static files\nSTATICFILES_STORAGE = \\\n 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'npm.finders.NpmFinder',\n)\n\n# django-npm settings:\nNPM_ROOT_PATH = path.abspath(path.join(path.dirname(__file__), '../..'))\n\n# If you have run \"npm install\", rather than \"npm install --production\",\n# you will get a lot of devDependencies installed in node_modules/ which\n# are only needed for development/testing (e.g. \"npm test\") and don't\n# need to be copied when running collectstatic. NPM_FILE_PATTERNS\n# specifies the folders within node_modules/ which do need to be copied:\nNPM_FILE_PATTERNS = {\n 'angular': ['*'],\n 'angular-resource': ['*'],\n 'backbone': ['*'],\n 'backbone-forms': ['*'],\n 'blueimp-file-upload': ['*'],\n 'bootstrap': ['dist/*'],\n 'bootstrap-3-typeahead': ['*'],\n 'clipboard': ['*'],\n 'font-awesome': ['css/*', 'fonts/*'],\n 'jquery': ['*'],\n 'jquery-migrate': ['*'],\n 'jquery-ui-dist': ['jquery-ui.min.js'],\n 'mustache': ['mustache.min.js'],\n 'ng-dialog': ['*'],\n 'sprintf-js': ['dist/*'],\n 'underscore': ['*'],\n 'underscore.string': ['dist/*']\n}\n", "path": "tardis/default_settings/static_files.py"}]} | 1,413 | 238 |
gh_patches_debug_13235 | rasdani/github-patches | git_diff | kivy__python-for-android-618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jpeg recipe is broken
It is missing /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk
Perpaps just the path is incorrect?
```
[INFO]: Prebuilding jpeg for armeabi
[INFO]: jpeg has no prebuild_armeabi, skipping
[DEBUG]: -> running cp /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk /home/brussee/.local/share/python-for-android/build/other_builds/jpeg/armeabi/jpeg/Application.mk
[DEBUG]: /bin/cp: cannot stat ‘/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk’: No such file or directory
Traceback (most recent call last):
File "/home/brussee/.local/bin/p4a", line 9, in <module>
load_entry_point('python-for-android==0.3', 'console_scripts', 'p4a')()
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 708, in main
ToolchainCL()
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 323, in __init__
getattr(self, args.command)(unknown)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 105, in wrapper_func
build_dist_from_args(ctx, dist, dist_args)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 142, in build_dist_from_args
build_recipes(build_order, python_modules, ctx)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/build.py", line 543, in build_recipes
recipe.prebuild_arch(arch)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/__init__.py", line 22, in prebuild_arch
shprint(sh.cp, join(self.get_recipe_dir(), 'Application.mk'), app_mk)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/logger.py", line 160, in shprint
for line in output:
File "/home/brussee/.local/lib/python2.7/site-packages/sh.py", line 565, in next
self.wait()
File "/home/brussee/.local/lib/python2.7/site-packages/sh.py", line 500, in wait
self.handle_command_exit_code(exit_code)
File "/home/brussee/.local/lib/python2.7/site-packages/sh.py", line 516, in handle_command_exit_code
raise exc(self.ran, self.process.stdout, self.process.stderr)
sh.ErrorReturnCode_1
```
</issue>
<code>
[start of setup.py]
1
2 from setuptools import setup, find_packages
3 from os import walk
4 from os.path import join, dirname, sep
5 import os
6 import glob
7
8 # NOTE: All package data should also be set in MANIFEST.in
9
10 packages = find_packages()
11
12 package_data = {'': ['*.tmpl',
13 '*.patch', ], }
14
15 data_files = []
16
17 # By specifying every file manually, package_data will be able to
18 # include them in binary distributions. Note that we have to add
19 # everything as a 'pythonforandroid' rule, using '' apparently doesn't
20 # work.
21 def recursively_include(results, directory, patterns):
22 for root, subfolders, files in walk(directory):
23 for fn in files:
24 if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):
25 continue
26 filename = join(root, fn)
27 directory = 'pythonforandroid'
28 if directory not in results:
29 results[directory] = []
30 results[directory].append(join(*filename.split(sep)[1:]))
31
32 recursively_include(package_data, 'pythonforandroid/recipes',
33 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])
34 recursively_include(package_data, 'pythonforandroid/bootstraps',
35 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
36 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])
37 recursively_include(package_data, 'pythonforandroid/bootstraps',
38 ['sdl-config', ])
39 recursively_include(package_data, 'pythonforandroid',
40 ['liblink', 'biglink', 'liblink.sh'])
41
42 setup(name='python-for-android',
43 version='0.3',
44 description='Android APK packager for Python scripts and apps',
45 author='The Kivy team',
46 author_email='[email protected]',
47 url='https://github.com/kivy/python-for-android',
48 license='MIT',
49 install_requires=['appdirs', 'colorama>0.3', 'sh', 'jinja2', 'argparse',
50 'six'],
51 entry_points={
52 'console_scripts': [
53 'python-for-android = pythonforandroid.toolchain:main',
54 'p4a = pythonforandroid.toolchain:main',
55 ],
56 'distutils.commands': [
57 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',
58 ],
59 },
60 classifiers = [
61 'Development Status :: 3 - Alpha',
62 'Intended Audience :: Developers',
63 'License :: OSI Approved :: MIT License',
64 'Operating System :: Microsoft :: Windows',
65 'Operating System :: OS Independent',
66 'Operating System :: POSIX :: Linux',
67 'Operating System :: MacOS :: MacOS X',
68 'Programming Language :: C',
69 'Programming Language :: Python :: 2',
70 'Programming Language :: Python :: 3',
71 'Topic :: Software Development',
72 'Topic :: Utilities',
73 ],
74 packages=packages,
75 package_data=package_data,
76 )
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,8 @@
results[directory].append(join(*filename.split(sep)[1:]))
recursively_include(package_data, 'pythonforandroid/recipes',
- ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])
+ ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
+ '*.mk', ])
recursively_include(package_data, 'pythonforandroid/bootstraps',
['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
'*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,8 @@\n results[directory].append(join(*filename.split(sep)[1:]))\n \n recursively_include(package_data, 'pythonforandroid/recipes',\n- ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])\n+ ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n+ '*.mk', ])\n recursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])\n", "issue": "Jpeg recipe is broken\nIt is missing /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk\nPerpaps just the path is incorrect?\n\n```\n[INFO]: Prebuilding jpeg for armeabi\n[INFO]: jpeg has no prebuild_armeabi, skipping\n[DEBUG]: -> running cp /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk /home/brussee/.local/share/python-for-android/build/other_builds/jpeg/armeabi/jpeg/Application.mk\n[DEBUG]: /bin/cp: cannot stat \u2018/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk\u2019: No such file or directory\nTraceback (most recent call last):\n File \"/home/brussee/.local/bin/p4a\", line 9, in <module>\n load_entry_point('python-for-android==0.3', 'console_scripts', 'p4a')()\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 708, in main\n ToolchainCL()\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 323, in __init__\n getattr(self, args.command)(unknown)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 105, in wrapper_func\n build_dist_from_args(ctx, dist, dist_args)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 142, in build_dist_from_args\n build_recipes(build_order, python_modules, ctx)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/build.py\", line 543, in build_recipes\n recipe.prebuild_arch(arch)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/__init__.py\", line 22, in prebuild_arch\n shprint(sh.cp, join(self.get_recipe_dir(), 'Application.mk'), app_mk)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/logger.py\", line 160, in shprint\n for line in output:\n File \"/home/brussee/.local/lib/python2.7/site-packages/sh.py\", line 565, in next\n self.wait()\n File \"/home/brussee/.local/lib/python2.7/site-packages/sh.py\", line 500, in wait\n self.handle_command_exit_code(exit_code)\n File \"/home/brussee/.local/lib/python2.7/site-packages/sh.py\", line 516, in handle_command_exit_code\n raise exc(self.ran, self.process.stdout, self.process.stderr)\nsh.ErrorReturnCode_1\n```\n\n", "before_files": [{"content": "\nfrom setuptools import setup, find_packages\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport glob\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nsetup(name='python-for-android',\n version='0.3',\n description='Android APK packager for Python scripts and apps',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android', \n license='MIT', \n install_requires=['appdirs', 'colorama>0.3', 'sh', 'jinja2', 'argparse',\n 'six'],\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.toolchain:main',\n 'p4a = pythonforandroid.toolchain:main',\n ],\n 'distutils.commands': [\n 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]} | 1,983 | 168 |
gh_patches_debug_5623 | rasdani/github-patches | git_diff | spack__spack-3415 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bison doesn't find m4 at run time
While building `flex`, I had `bison` fail because it could not execute `m4`. The reason was that I had uninstalled the `m4` package (via Spack) which `bison` installed as its build dependency. Then, `bison` failed since this `m4` executable did not exist any more.
I think `m4` needs to be a run-time dependency of `bison` as well.
</issue>
<code>
[start of var/spack/repos/builtin/packages/autoconf/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Autoconf(AutotoolsPackage):
29 """Autoconf -- system configuration part of autotools"""
30
31 homepage = 'https://www.gnu.org/software/autoconf/'
32 url = 'http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz'
33
34 version('2.69', '82d05e03b93e45f5a39b828dc9c6c29b')
35 version('2.62', '6c1f3b3734999035d77da5024aab4fbd')
36 version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')
37 version('2.13', '9de56d4a161a723228220b0f425dc711')
38
39 depends_on('[email protected]:', type='build')
40
41 build_directory = 'spack-build'
42
43 def _make_executable(self, name):
44 return Executable(join_path(self.prefix.bin, name))
45
46 def setup_dependent_package(self, module, dependent_spec):
47 # Autoconf is very likely to be a build dependency,
48 # so we add the tools it provides to the dependent module
49 executables = ['autoconf',
50 'autoheader',
51 'autom4te',
52 'autoreconf',
53 'autoscan',
54 'autoupdate',
55 'ifnames']
56 for name in executables:
57 setattr(module, name, self._make_executable(name))
58
[end of var/spack/repos/builtin/packages/autoconf/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/autoconf/package.py b/var/spack/repos/builtin/packages/autoconf/package.py
--- a/var/spack/repos/builtin/packages/autoconf/package.py
+++ b/var/spack/repos/builtin/packages/autoconf/package.py
@@ -36,7 +36,9 @@
version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')
version('2.13', '9de56d4a161a723228220b0f425dc711')
- depends_on('[email protected]:', type='build')
+ # Note: m4 is not a pure build-time dependency of autoconf. m4 is
+ # needed when autoconf runs, not only when autoconf is built.
+ depends_on('[email protected]:', type=('build', 'run'))
build_directory = 'spack-build'
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/autoconf/package.py b/var/spack/repos/builtin/packages/autoconf/package.py\n--- a/var/spack/repos/builtin/packages/autoconf/package.py\n+++ b/var/spack/repos/builtin/packages/autoconf/package.py\n@@ -36,7 +36,9 @@\n version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')\n version('2.13', '9de56d4a161a723228220b0f425dc711')\n \n- depends_on('[email protected]:', type='build')\n+ # Note: m4 is not a pure build-time dependency of autoconf. m4 is\n+ # needed when autoconf runs, not only when autoconf is built.\n+ depends_on('[email protected]:', type=('build', 'run'))\n \n build_directory = 'spack-build'\n", "issue": "bison doesn't find m4 at run time\nWhile building `flex`, I had `bison` fail because it could not execute `m4`. The reason was that I had uninstalled the `m4` package (via Spack) which `bison` installed as its build dependency. Then, `bison` failed since this `m4` executable did not exist any more.\r\n\r\nI think `m4` needs to be a run-time dependency of `bison` as well.\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Autoconf(AutotoolsPackage):\n \"\"\"Autoconf -- system configuration part of autotools\"\"\"\n\n homepage = 'https://www.gnu.org/software/autoconf/'\n url = 'http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz'\n\n version('2.69', '82d05e03b93e45f5a39b828dc9c6c29b')\n version('2.62', '6c1f3b3734999035d77da5024aab4fbd')\n version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')\n version('2.13', '9de56d4a161a723228220b0f425dc711')\n\n depends_on('[email protected]:', type='build')\n\n build_directory = 'spack-build'\n\n def _make_executable(self, name):\n return Executable(join_path(self.prefix.bin, name))\n\n def setup_dependent_package(self, module, dependent_spec):\n # Autoconf is very likely to be a build dependency,\n # so we add the tools it provides to the dependent module\n executables = ['autoconf',\n 'autoheader',\n 'autom4te',\n 'autoreconf',\n 'autoscan',\n 'autoupdate',\n 'ifnames']\n for name in executables:\n setattr(module, name, self._make_executable(name))\n", "path": "var/spack/repos/builtin/packages/autoconf/package.py"}]} | 1,421 | 238 |
gh_patches_debug_13432 | rasdani/github-patches | git_diff | nilearn__nilearn-936 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plots won't show up
Hi everyone,
using nilearn on OSX El Capitan, when executing the example scripts like plot_demo_glass_brain.py no plots will show up. PiP and iPython linked to the same folders, matplotlib alone does show plots. All dependencies are up-to-date and installed. Don't really know how to fix this.
</issue>
<code>
[start of nilearn/plotting/__init__.py]
1 """
2 Plotting code for nilearn
3 """
4 # Authors: Chris Filo Gorgolewski, Gael Varoquaux
5
6 ###############################################################################
7 # Make sure that we don't get DISPLAY problems when running without X on
8 # unices
9 def _set_mpl_backend():
10 try:
11 # We are doing local imports here to avoid poluting our namespace
12 import matplotlib
13 import os
14 # Set the backend to a non-interactive one for unices without X
15 if os.name == 'posix' and 'DISPLAY' not in os.environ:
16 matplotlib.use('Agg')
17 except ImportError:
18 from .._utils.testing import skip_if_running_nose
19 # No need to fail when running tests
20 skip_if_running_nose('matplotlib not installed')
21 raise
22 else:
23 from ..version import (_import_module_with_version_check,
24 OPTIONAL_MATPLOTLIB_MIN_VERSION)
25 # When matplotlib was successfully imported we need to check
26 # that the version is greater that the minimum required one
27 _import_module_with_version_check('matplotlib',
28 OPTIONAL_MATPLOTLIB_MIN_VERSION)
29
30 _set_mpl_backend()
31
32 ###############################################################################
33
34 from . import cm
35 from .img_plotting import plot_img, plot_anat, plot_epi, \
36 plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \
37 plot_prob_atlas, show
38 from .find_cuts import find_xyz_cut_coords, find_cut_slices
39
40 __all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',
41 'plot_roi', 'plot_stat_map', 'plot_glass_brain',
42 'plot_connectome', 'plot_prob_atlas',
43 'find_xyz_cut_coords', 'find_cut_slices',
44 'show']
45
[end of nilearn/plotting/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py
--- a/nilearn/plotting/__init__.py
+++ b/nilearn/plotting/__init__.py
@@ -11,8 +11,12 @@
# We are doing local imports here to avoid poluting our namespace
import matplotlib
import os
+ import sys
# Set the backend to a non-interactive one for unices without X
- if os.name == 'posix' and 'DISPLAY' not in os.environ:
+ if (os.name == 'posix' and 'DISPLAY' not in os.environ
+ and not (sys.platform == 'darwin'
+ and matplotlib.get_backend() == 'MacOSX'
+ )):
matplotlib.use('Agg')
except ImportError:
from .._utils.testing import skip_if_running_nose
| {"golden_diff": "diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py\n--- a/nilearn/plotting/__init__.py\n+++ b/nilearn/plotting/__init__.py\n@@ -11,8 +11,12 @@\n # We are doing local imports here to avoid poluting our namespace\n import matplotlib\n import os\n+ import sys\n # Set the backend to a non-interactive one for unices without X\n- if os.name == 'posix' and 'DISPLAY' not in os.environ:\n+ if (os.name == 'posix' and 'DISPLAY' not in os.environ\n+ and not (sys.platform == 'darwin'\n+ and matplotlib.get_backend() == 'MacOSX'\n+ )):\n matplotlib.use('Agg')\n except ImportError:\n from .._utils.testing import skip_if_running_nose\n", "issue": "Plots won't show up\nHi everyone,\n\nusing nilearn on OSX El Capitan, when executing the example scripts like plot_demo_glass_brain.py no plots will show up. PiP and iPython linked to the same folders, matplotlib alone does show plots. All dependencies are up-to-date and installed. Don't really know how to fix this.\n\n", "before_files": [{"content": "\"\"\"\nPlotting code for nilearn\n\"\"\"\n# Authors: Chris Filo Gorgolewski, Gael Varoquaux\n\n###############################################################################\n# Make sure that we don't get DISPLAY problems when running without X on\n# unices\ndef _set_mpl_backend():\n try:\n # We are doing local imports here to avoid poluting our namespace\n import matplotlib\n import os\n # Set the backend to a non-interactive one for unices without X\n if os.name == 'posix' and 'DISPLAY' not in os.environ:\n matplotlib.use('Agg')\n except ImportError:\n from .._utils.testing import skip_if_running_nose\n # No need to fail when running tests\n skip_if_running_nose('matplotlib not installed')\n raise\n else:\n from ..version import (_import_module_with_version_check,\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n # When matplotlib was successfully imported we need to check\n # that the version is greater that the minimum required one\n _import_module_with_version_check('matplotlib',\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n\n_set_mpl_backend()\n\n###############################################################################\n\nfrom . import cm\nfrom .img_plotting import plot_img, plot_anat, plot_epi, \\\n plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \\\n plot_prob_atlas, show\nfrom .find_cuts import find_xyz_cut_coords, find_cut_slices\n\n__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',\n 'plot_roi', 'plot_stat_map', 'plot_glass_brain',\n 'plot_connectome', 'plot_prob_atlas',\n 'find_xyz_cut_coords', 'find_cut_slices',\n 'show']\n", "path": "nilearn/plotting/__init__.py"}]} | 1,078 | 200 |
gh_patches_debug_5579 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-6373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Outfile -w cannot be loaded
#### Problem Description
When the dump file is getting bigger, about 100mb it's not loaded anymore.
#### Steps to reproduce the behavior:
Make a big outfile and try to open it with an new instance of mitmweb.
#### System Information
Mitmweb Windows 10 6.0.2
mitmweb Not loading my saved flow
So I recorded some actions with mitmweb and saved the flow.
Then I closed mitmweb, and reopened it. Then I went to open the saved flow file (which is 100 megabytes). But when I open it, the requests and responses do not appear?
</issue>
<code>
[start of mitmproxy/tools/web/master.py]
1 import errno
2 import logging
3
4 import tornado.httpserver
5 import tornado.ioloop
6
7 from mitmproxy import addons
8 from mitmproxy import flow
9 from mitmproxy import log
10 from mitmproxy import master
11 from mitmproxy import options
12 from mitmproxy import optmanager
13 from mitmproxy.addons import errorcheck
14 from mitmproxy.addons import eventstore
15 from mitmproxy.addons import intercept
16 from mitmproxy.addons import readfile
17 from mitmproxy.addons import termlog
18 from mitmproxy.addons import view
19 from mitmproxy.addons.proxyserver import Proxyserver
20 from mitmproxy.tools.web import app
21 from mitmproxy.tools.web import static_viewer
22 from mitmproxy.tools.web import webaddons
23
24 logger = logging.getLogger(__name__)
25
26
27 class WebMaster(master.Master):
28 def __init__(self, opts: options.Options, with_termlog: bool = True):
29 super().__init__(opts)
30 self.view = view.View()
31 self.view.sig_view_add.connect(self._sig_view_add)
32 self.view.sig_view_remove.connect(self._sig_view_remove)
33 self.view.sig_view_update.connect(self._sig_view_update)
34 self.view.sig_view_refresh.connect(self._sig_view_refresh)
35
36 self.events = eventstore.EventStore()
37 self.events.sig_add.connect(self._sig_events_add)
38 self.events.sig_refresh.connect(self._sig_events_refresh)
39
40 self.options.changed.connect(self._sig_options_update)
41
42 if with_termlog:
43 self.addons.add(termlog.TermLog())
44 self.addons.add(*addons.default_addons())
45 self.addons.add(
46 webaddons.WebAddon(),
47 intercept.Intercept(),
48 readfile.ReadFile(),
49 static_viewer.StaticViewer(),
50 self.view,
51 self.events,
52 errorcheck.ErrorCheck(),
53 )
54 self.app = app.Application(self, self.options.web_debug)
55 self.proxyserver: Proxyserver = self.addons.get("proxyserver")
56 self.proxyserver.servers.changed.connect(self._sig_servers_changed)
57
58 def _sig_view_add(self, flow: flow.Flow) -> None:
59 app.ClientConnection.broadcast(
60 resource="flows", cmd="add", data=app.flow_to_json(flow)
61 )
62
63 def _sig_view_update(self, flow: flow.Flow) -> None:
64 app.ClientConnection.broadcast(
65 resource="flows", cmd="update", data=app.flow_to_json(flow)
66 )
67
68 def _sig_view_remove(self, flow: flow.Flow, index: int) -> None:
69 app.ClientConnection.broadcast(resource="flows", cmd="remove", data=flow.id)
70
71 def _sig_view_refresh(self) -> None:
72 app.ClientConnection.broadcast(resource="flows", cmd="reset")
73
74 def _sig_events_add(self, entry: log.LogEntry) -> None:
75 app.ClientConnection.broadcast(
76 resource="events", cmd="add", data=app.logentry_to_json(entry)
77 )
78
79 def _sig_events_refresh(self) -> None:
80 app.ClientConnection.broadcast(resource="events", cmd="reset")
81
82 def _sig_options_update(self, updated: set[str]) -> None:
83 options_dict = optmanager.dump_dicts(self.options, updated)
84 app.ClientConnection.broadcast(
85 resource="options", cmd="update", data=options_dict
86 )
87
88 def _sig_servers_changed(self) -> None:
89 app.ClientConnection.broadcast(
90 resource="state",
91 cmd="update",
92 data={"servers": [s.to_json() for s in self.proxyserver.servers]},
93 )
94
95 async def running(self):
96 # Register tornado with the current event loop
97 tornado.ioloop.IOLoop.current()
98
99 # Add our web app.
100 http_server = tornado.httpserver.HTTPServer(self.app)
101 try:
102 http_server.listen(self.options.web_port, self.options.web_host)
103 except OSError as e:
104 message = f"Web server failed to listen on {self.options.web_host or '*'}:{self.options.web_port} with {e}"
105 if e.errno == errno.EADDRINUSE:
106 message += f"\nTry specifying a different port by using `--set web_port={self.options.web_port + 2}`."
107 raise OSError(e.errno, message, e.filename) from e
108
109 logger.info(
110 f"Web server listening at http://{self.options.web_host}:{self.options.web_port}/",
111 )
112
113 return await super().running()
114
[end of mitmproxy/tools/web/master.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/tools/web/master.py b/mitmproxy/tools/web/master.py
--- a/mitmproxy/tools/web/master.py
+++ b/mitmproxy/tools/web/master.py
@@ -97,7 +97,9 @@
tornado.ioloop.IOLoop.current()
# Add our web app.
- http_server = tornado.httpserver.HTTPServer(self.app)
+ http_server = tornado.httpserver.HTTPServer(
+ self.app, max_buffer_size=2**32
+ ) # 4GB
try:
http_server.listen(self.options.web_port, self.options.web_host)
except OSError as e:
| {"golden_diff": "diff --git a/mitmproxy/tools/web/master.py b/mitmproxy/tools/web/master.py\n--- a/mitmproxy/tools/web/master.py\n+++ b/mitmproxy/tools/web/master.py\n@@ -97,7 +97,9 @@\n tornado.ioloop.IOLoop.current()\n \n # Add our web app.\n- http_server = tornado.httpserver.HTTPServer(self.app)\n+ http_server = tornado.httpserver.HTTPServer(\n+ self.app, max_buffer_size=2**32\n+ ) # 4GB\n try:\n http_server.listen(self.options.web_port, self.options.web_host)\n except OSError as e:\n", "issue": "Outfile -w cannot be loaded\n#### Problem Description\nWhen the dump file is getting bigger, about 100mb it's not loaded anymore.\n\n#### Steps to reproduce the behavior:\nMake a big outfile and try to open it with an new instance of mitmweb.\n\n#### System Information\nMitmweb Windows 10 6.0.2\nmitmweb Not loading my saved flow\nSo I recorded some actions with mitmweb and saved the flow.\r\n\r\nThen I closed mitmweb, and reopened it. Then I went to open the saved flow file (which is 100 megabytes). But when I open it, the requests and responses do not appear?\r\n\n", "before_files": [{"content": "import errno\nimport logging\n\nimport tornado.httpserver\nimport tornado.ioloop\n\nfrom mitmproxy import addons\nfrom mitmproxy import flow\nfrom mitmproxy import log\nfrom mitmproxy import master\nfrom mitmproxy import options\nfrom mitmproxy import optmanager\nfrom mitmproxy.addons import errorcheck\nfrom mitmproxy.addons import eventstore\nfrom mitmproxy.addons import intercept\nfrom mitmproxy.addons import readfile\nfrom mitmproxy.addons import termlog\nfrom mitmproxy.addons import view\nfrom mitmproxy.addons.proxyserver import Proxyserver\nfrom mitmproxy.tools.web import app\nfrom mitmproxy.tools.web import static_viewer\nfrom mitmproxy.tools.web import webaddons\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebMaster(master.Master):\n def __init__(self, opts: options.Options, with_termlog: bool = True):\n super().__init__(opts)\n self.view = view.View()\n self.view.sig_view_add.connect(self._sig_view_add)\n self.view.sig_view_remove.connect(self._sig_view_remove)\n self.view.sig_view_update.connect(self._sig_view_update)\n self.view.sig_view_refresh.connect(self._sig_view_refresh)\n\n self.events = eventstore.EventStore()\n self.events.sig_add.connect(self._sig_events_add)\n self.events.sig_refresh.connect(self._sig_events_refresh)\n\n self.options.changed.connect(self._sig_options_update)\n\n if with_termlog:\n self.addons.add(termlog.TermLog())\n self.addons.add(*addons.default_addons())\n self.addons.add(\n webaddons.WebAddon(),\n intercept.Intercept(),\n readfile.ReadFile(),\n static_viewer.StaticViewer(),\n self.view,\n self.events,\n errorcheck.ErrorCheck(),\n )\n self.app = app.Application(self, self.options.web_debug)\n self.proxyserver: Proxyserver = self.addons.get(\"proxyserver\")\n self.proxyserver.servers.changed.connect(self._sig_servers_changed)\n\n def _sig_view_add(self, flow: flow.Flow) -> None:\n app.ClientConnection.broadcast(\n resource=\"flows\", cmd=\"add\", data=app.flow_to_json(flow)\n )\n\n def _sig_view_update(self, flow: flow.Flow) -> None:\n app.ClientConnection.broadcast(\n resource=\"flows\", cmd=\"update\", data=app.flow_to_json(flow)\n )\n\n def _sig_view_remove(self, flow: flow.Flow, index: int) -> None:\n app.ClientConnection.broadcast(resource=\"flows\", cmd=\"remove\", data=flow.id)\n\n def _sig_view_refresh(self) -> None:\n app.ClientConnection.broadcast(resource=\"flows\", cmd=\"reset\")\n\n def _sig_events_add(self, entry: log.LogEntry) -> None:\n app.ClientConnection.broadcast(\n resource=\"events\", cmd=\"add\", data=app.logentry_to_json(entry)\n )\n\n def _sig_events_refresh(self) -> None:\n app.ClientConnection.broadcast(resource=\"events\", cmd=\"reset\")\n\n def _sig_options_update(self, updated: set[str]) -> None:\n options_dict = optmanager.dump_dicts(self.options, updated)\n app.ClientConnection.broadcast(\n resource=\"options\", cmd=\"update\", data=options_dict\n )\n\n def _sig_servers_changed(self) -> None:\n app.ClientConnection.broadcast(\n resource=\"state\",\n cmd=\"update\",\n data={\"servers\": [s.to_json() for s in self.proxyserver.servers]},\n )\n\n async def running(self):\n # Register tornado with the current event loop\n tornado.ioloop.IOLoop.current()\n\n # Add our web app.\n http_server = tornado.httpserver.HTTPServer(self.app)\n try:\n http_server.listen(self.options.web_port, self.options.web_host)\n except OSError as e:\n message = f\"Web server failed to listen on {self.options.web_host or '*'}:{self.options.web_port} with {e}\"\n if e.errno == errno.EADDRINUSE:\n message += f\"\\nTry specifying a different port by using `--set web_port={self.options.web_port + 2}`.\"\n raise OSError(e.errno, message, e.filename) from e\n\n logger.info(\n f\"Web server listening at http://{self.options.web_host}:{self.options.web_port}/\",\n )\n\n return await super().running()\n", "path": "mitmproxy/tools/web/master.py"}]} | 1,848 | 141 |
gh_patches_debug_54195 | rasdani/github-patches | git_diff | vyperlang__vyper-1275 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
State leakage across test runs when using parrellization
### What is wrong.
The tests at `tests/examples/safe_remote_purchase/test_safe_remote_purchase.py` fail when run using `pytest-xdist` to parallelize test runs.
```
def test_abort(w3, assert_tx_failed, check_balance, get_contract, contract_code):
a0, a1, a2 = w3.eth.accounts[:3]
c = get_contract(contract_code, value=2)
# Only sender can trigger refund
assert_tx_failed(lambda: c.abort(transact={'from': a2}))
# Refund works correctly
c.abort(transact={'from': a0, 'gasPrice': 0})
> assert check_balance() == (INIT_BAL_a0 - w3.toWei(2, 'ether'), INIT_BAL_a1)
E assert (100000000000...0000000000000) == (9999980000000...0000000000000)
E At index 0 diff: 1000000000000000000000000 != 999998000000000000000000
E Use -v to get the full diff
tests/examples/safe_remote_purchase/test_safe_remote_purchase.py:62: AssertionError
```
replicate by installing `pytest-xdist` and running with
```
pytest tests/examples/safe_remote_purchase/test_safe_remote_purchase.py -n 2
```
It's likely this isn't deterministic and you may need to run the full suite.
### How can it be fixed.
Figure out where statefulness is leaking across test runs and fix it.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 test_deps = [
7 'pytest',
8 'pytest-cov',
9 'py-evm==0.2.0a34',
10 'eth-tester==0.1.0b33',
11 'web3==4.8.2',
12 ]
13
14
15 extras = {
16 'test': test_deps
17 }
18
19
20 setup(
21 name='vyper',
22 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
23 version='0.1.0-beta.8',
24 description='Vyper Programming Language for Ethereum',
25 long_description_markdown_filename='README.md',
26 author='Vitalik Buterin',
27 author_email='',
28 url='https://github.com/ethereum/vyper',
29 license="MIT",
30 keywords='ethereum',
31 include_package_data=True,
32 packages=find_packages(exclude=('tests', 'docs')),
33 python_requires='>=3.6',
34 py_modules=['vyper'],
35 install_requires=[
36 'pycryptodome>=3.5.1,<4',
37 ],
38 setup_requires=[
39 'pytest-runner',
40 'setuptools-markdown'
41 ],
42 tests_require=test_deps,
43 extras_require=extras,
44 scripts=[
45 'bin/vyper',
46 'bin/vyper-serve',
47 'bin/vyper-lll'
48 ],
49 classifiers=[
50 'Intended Audience :: Developers',
51 'License :: OSI Approved :: MIT License',
52 'Programming Language :: Python :: 3.6',
53 ]
54 )
55
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,11 +4,12 @@
test_deps = [
- 'pytest',
- 'pytest-cov',
- 'py-evm==0.2.0a34',
- 'eth-tester==0.1.0b33',
- 'web3==4.8.2',
+ 'pytest>=3.6',
+ 'pytest-cov==2.4.0',
+ 'pytest-xdist==1.18.1',
+ 'py-evm==0.2.0a39',
+ 'eth-tester==0.1.0b37',
+ 'web3==5.0.0a6'
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,11 +4,12 @@\n \n \n test_deps = [\n- 'pytest',\n- 'pytest-cov',\n- 'py-evm==0.2.0a34',\n- 'eth-tester==0.1.0b33',\n- 'web3==4.8.2',\n+ 'pytest>=3.6',\n+ 'pytest-cov==2.4.0',\n+ 'pytest-xdist==1.18.1',\n+ 'py-evm==0.2.0a39',\n+ 'eth-tester==0.1.0b37',\n+ 'web3==5.0.0a6'\n ]\n", "issue": "State leakage across test runs when using parrellization\n### What is wrong.\r\n\r\nThe tests at `tests/examples/safe_remote_purchase/test_safe_remote_purchase.py` fail when run using `pytest-xdist` to parallelize test runs.\r\n\r\n```\r\n def test_abort(w3, assert_tx_failed, check_balance, get_contract, contract_code):\r\n a0, a1, a2 = w3.eth.accounts[:3]\r\n c = get_contract(contract_code, value=2)\r\n # Only sender can trigger refund\r\n assert_tx_failed(lambda: c.abort(transact={'from': a2}))\r\n # Refund works correctly\r\n c.abort(transact={'from': a0, 'gasPrice': 0})\r\n> assert check_balance() == (INIT_BAL_a0 - w3.toWei(2, 'ether'), INIT_BAL_a1)\r\nE assert (100000000000...0000000000000) == (9999980000000...0000000000000)\r\nE At index 0 diff: 1000000000000000000000000 != 999998000000000000000000\r\nE Use -v to get the full diff\r\n\r\ntests/examples/safe_remote_purchase/test_safe_remote_purchase.py:62: AssertionError\r\n```\r\n\r\nreplicate by installing `pytest-xdist` and running with\r\n\r\n```\r\npytest tests/examples/safe_remote_purchase/test_safe_remote_purchase.py -n 2\r\n```\r\n\r\nIt's likely this isn't deterministic and you may need to run the full suite.\r\n\r\n### How can it be fixed.\r\n\r\nFigure out where statefulness is leaking across test runs and fix it.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\ntest_deps = [\n 'pytest',\n 'pytest-cov',\n 'py-evm==0.2.0a34',\n 'eth-tester==0.1.0b33',\n 'web3==4.8.2',\n]\n\n\nextras = {\n 'test': test_deps\n}\n\n\nsetup(\n name='vyper',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='0.1.0-beta.8',\n description='Vyper Programming Language for Ethereum',\n long_description_markdown_filename='README.md',\n author='Vitalik Buterin',\n author_email='',\n url='https://github.com/ethereum/vyper',\n license=\"MIT\",\n keywords='ethereum',\n include_package_data=True,\n packages=find_packages(exclude=('tests', 'docs')),\n python_requires='>=3.6',\n py_modules=['vyper'],\n install_requires=[\n 'pycryptodome>=3.5.1,<4',\n ],\n setup_requires=[\n 'pytest-runner',\n 'setuptools-markdown'\n ],\n tests_require=test_deps,\n extras_require=extras,\n scripts=[\n 'bin/vyper',\n 'bin/vyper-serve',\n 'bin/vyper-lll'\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n ]\n)\n", "path": "setup.py"}]} | 1,367 | 177 |
gh_patches_debug_217 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3701 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing 4293: can't edit polls somebody else created even if I have the rights
**URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24-4/poll/
**user:** group member
**expected behaviour:** I can edit polls somebody else created if I have the right to do so
**behaviour:** cannot save, getting an red altert
**important screensize:**
**device & browser:**
**Comment/Question:** also true for new polls whose rights have been given to me. for polls I started myself it is fine.

Screenshot?
</issue>
<code>
[start of meinberlin/apps/polls/rules.py]
1 import rules
2
3 from adhocracy4.modules import predicates as module_predicates
4
5 rules.set_perm(
6 'a4polls.change_poll',
7 module_predicates.is_context_initiator |
8 module_predicates.is_context_moderator
9 )
10
[end of meinberlin/apps/polls/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/polls/rules.py b/meinberlin/apps/polls/rules.py
--- a/meinberlin/apps/polls/rules.py
+++ b/meinberlin/apps/polls/rules.py
@@ -4,6 +4,5 @@
rules.set_perm(
'a4polls.change_poll',
- module_predicates.is_context_initiator |
- module_predicates.is_context_moderator
+ module_predicates.is_project_admin
)
| {"golden_diff": "diff --git a/meinberlin/apps/polls/rules.py b/meinberlin/apps/polls/rules.py\n--- a/meinberlin/apps/polls/rules.py\n+++ b/meinberlin/apps/polls/rules.py\n@@ -4,6 +4,5 @@\n \n rules.set_perm(\n 'a4polls.change_poll',\n- module_predicates.is_context_initiator |\n- module_predicates.is_context_moderator\n+ module_predicates.is_project_admin\n )\n", "issue": "testing 4293: can't edit polls somebody else created even if I have the rights\n**URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24-4/poll/\r\n**user:** group member\r\n**expected behaviour:** I can edit polls somebody else created if I have the right to do so\r\n**behaviour:** cannot save, getting an red altert\r\n**important screensize:**\r\n**device & browser:** \r\n **Comment/Question:** also true for new polls whose rights have been given to me. for polls I started myself it is fine.\r\n\r\n\r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "import rules\n\nfrom adhocracy4.modules import predicates as module_predicates\n\nrules.set_perm(\n 'a4polls.change_poll',\n module_predicates.is_context_initiator |\n module_predicates.is_context_moderator\n)\n", "path": "meinberlin/apps/polls/rules.py"}]} | 820 | 104 |
gh_patches_debug_19101 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-1075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[2.0] Update setup.py Trove classifiers
The [classifiers](https://github.com/aio-libs/aioredis-py/blob/5a713fff3717094cca63e4a5f4b1cb7d6894a08f/setup.py#L25-L30) currently only list Python 3.6 and 3.7. This should be updated to include all versions that are tested. Or my personal preference is just to delete classifiers for minor versions, since probably no-one will remember to update them when new versions of Python are released.
It also indicates the status as 4 - Beta. That should probably change when we release the final 2.0.0.
</issue>
<code>
[start of setup.py]
1 import os.path
2 import re
3
4 from setuptools import find_packages, setup
5
6
7 def read(*parts):
8 with open(os.path.join(*parts)) as f:
9 return f.read().strip()
10
11
12 def read_version():
13 regexp = re.compile(r"^__version__\W*=\W*\"([\d.abrc]+)\"")
14 init_py = os.path.join(os.path.dirname(__file__), "aioredis", "__init__.py")
15 with open(init_py) as f:
16 for line in f:
17 match = regexp.match(line)
18 if match is not None:
19 return match.group(1)
20 raise RuntimeError(f"Cannot find version in {init_py}")
21
22
23 classifiers = [
24 "License :: OSI Approved :: MIT License",
25 "Development Status :: 4 - Beta",
26 "Programming Language :: Python",
27 "Programming Language :: Python :: 3",
28 "Programming Language :: Python :: 3.6",
29 "Programming Language :: Python :: 3.7",
30 "Programming Language :: Python :: 3 :: Only",
31 "Operating System :: POSIX",
32 "Environment :: Web Environment",
33 "Intended Audience :: Developers",
34 "Topic :: Software Development",
35 "Topic :: Software Development :: Libraries",
36 "Framework :: AsyncIO",
37 ]
38
39 setup(
40 name="aioredis",
41 version=read_version(),
42 description="asyncio (PEP 3156) Redis support",
43 long_description="\n\n".join((read("README.md"), read("CHANGELOG.md"))),
44 long_description_content_type="text/markdown",
45 classifiers=classifiers,
46 platforms=["POSIX"],
47 url="https://github.com/aio-libs/aioredis",
48 license="MIT",
49 packages=find_packages(exclude=["tests"]),
50 install_requires=[
51 "async-timeout",
52 "typing-extensions",
53 ],
54 extras_require={
55 "hiredis": 'hiredis>=1.0; implementation_name=="cpython"',
56 },
57 package_data={"aioredis": ["py.typed"]},
58 python_requires=">=3.6",
59 include_package_data=True,
60 )
61
[end of setup.py]
[start of aioredis/__init__.py]
1 from aioredis.client import Redis, StrictRedis
2 from aioredis.connection import (
3 BlockingConnectionPool,
4 Connection,
5 ConnectionPool,
6 SSLConnection,
7 UnixDomainSocketConnection,
8 )
9 from aioredis.exceptions import (
10 AuthenticationError,
11 AuthenticationWrongNumberOfArgsError,
12 BusyLoadingError,
13 ChildDeadlockedError,
14 ConnectionError,
15 DataError,
16 InvalidResponse,
17 PubSubError,
18 ReadOnlyError,
19 RedisError,
20 ResponseError,
21 TimeoutError,
22 WatchError,
23 )
24 from aioredis.utils import from_url
25
26
27 def int_or_str(value):
28 try:
29 return int(value)
30 except ValueError:
31 return value
32
33
34 __version__ = "2.0.0b1"
35 VERSION = tuple(map(int_or_str, __version__.split(".")))
36
37 __all__ = [
38 "AuthenticationError",
39 "AuthenticationWrongNumberOfArgsError",
40 "BlockingConnectionPool",
41 "BusyLoadingError",
42 "ChildDeadlockedError",
43 "Connection",
44 "ConnectionError",
45 "ConnectionPool",
46 "DataError",
47 "from_url",
48 "InvalidResponse",
49 "PubSubError",
50 "ReadOnlyError",
51 "Redis",
52 "RedisError",
53 "ResponseError",
54 "SSLConnection",
55 "StrictRedis",
56 "TimeoutError",
57 "UnixDomainSocketConnection",
58 "WatchError",
59 ]
60
[end of aioredis/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aioredis/__init__.py b/aioredis/__init__.py
--- a/aioredis/__init__.py
+++ b/aioredis/__init__.py
@@ -31,7 +31,7 @@
return value
-__version__ = "2.0.0b1"
+__version__ = "2.0.0"
VERSION = tuple(map(int_or_str, __version__.split(".")))
__all__ = [
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,11 +22,13 @@
classifiers = [
"License :: OSI Approved :: MIT License",
- "Development Status :: 4 - Beta",
+ "Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: POSIX",
"Environment :: Web Environment",
| {"golden_diff": "diff --git a/aioredis/__init__.py b/aioredis/__init__.py\n--- a/aioredis/__init__.py\n+++ b/aioredis/__init__.py\n@@ -31,7 +31,7 @@\n return value\n \n \n-__version__ = \"2.0.0b1\"\n+__version__ = \"2.0.0\"\n VERSION = tuple(map(int_or_str, __version__.split(\".\")))\n \n __all__ = [\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,11 +22,13 @@\n \n classifiers = [\n \"License :: OSI Approved :: MIT License\",\n- \"Development Status :: 4 - Beta\",\n+ \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Operating System :: POSIX\",\n \"Environment :: Web Environment\",\n", "issue": "[2.0] Update setup.py Trove classifiers\nThe [classifiers](https://github.com/aio-libs/aioredis-py/blob/5a713fff3717094cca63e4a5f4b1cb7d6894a08f/setup.py#L25-L30) currently only list Python 3.6 and 3.7. This should be updated to include all versions that are tested. Or my personal preference is just to delete classifiers for minor versions, since probably no-one will remember to update them when new versions of Python are released.\r\n\r\nIt also indicates the status as 4 - Beta. That should probably change when we release the final 2.0.0.\n", "before_files": [{"content": "import os.path\nimport re\n\nfrom setuptools import find_packages, setup\n\n\ndef read(*parts):\n with open(os.path.join(*parts)) as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*\\\"([\\d.abrc]+)\\\"\")\n init_py = os.path.join(os.path.dirname(__file__), \"aioredis\", \"__init__.py\")\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n raise RuntimeError(f\"Cannot find version in {init_py}\")\n\n\nclassifiers = [\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Operating System :: POSIX\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Framework :: AsyncIO\",\n]\n\nsetup(\n name=\"aioredis\",\n version=read_version(),\n description=\"asyncio (PEP 3156) Redis support\",\n long_description=\"\\n\\n\".join((read(\"README.md\"), read(\"CHANGELOG.md\"))),\n long_description_content_type=\"text/markdown\",\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=[\n \"async-timeout\",\n \"typing-extensions\",\n ],\n extras_require={\n \"hiredis\": 'hiredis>=1.0; implementation_name==\"cpython\"',\n },\n package_data={\"aioredis\": [\"py.typed\"]},\n python_requires=\">=3.6\",\n include_package_data=True,\n)\n", "path": "setup.py"}, {"content": "from aioredis.client import Redis, StrictRedis\nfrom aioredis.connection import (\n BlockingConnectionPool,\n Connection,\n ConnectionPool,\n SSLConnection,\n UnixDomainSocketConnection,\n)\nfrom aioredis.exceptions import (\n AuthenticationError,\n AuthenticationWrongNumberOfArgsError,\n BusyLoadingError,\n ChildDeadlockedError,\n ConnectionError,\n DataError,\n InvalidResponse,\n PubSubError,\n ReadOnlyError,\n RedisError,\n ResponseError,\n TimeoutError,\n WatchError,\n)\nfrom aioredis.utils import from_url\n\n\ndef int_or_str(value):\n try:\n return int(value)\n except ValueError:\n return value\n\n\n__version__ = \"2.0.0b1\"\nVERSION = tuple(map(int_or_str, __version__.split(\".\")))\n\n__all__ = [\n \"AuthenticationError\",\n \"AuthenticationWrongNumberOfArgsError\",\n \"BlockingConnectionPool\",\n \"BusyLoadingError\",\n \"ChildDeadlockedError\",\n \"Connection\",\n \"ConnectionError\",\n \"ConnectionPool\",\n \"DataError\",\n \"from_url\",\n \"InvalidResponse\",\n \"PubSubError\",\n \"ReadOnlyError\",\n \"Redis\",\n \"RedisError\",\n \"ResponseError\",\n \"SSLConnection\",\n \"StrictRedis\",\n \"TimeoutError\",\n \"UnixDomainSocketConnection\",\n \"WatchError\",\n]\n", "path": "aioredis/__init__.py"}]} | 1,675 | 273 |
gh_patches_debug_8811 | rasdani/github-patches | git_diff | psf__black-2816 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add test for `A᧚ = 0`
Black v19.10b0 fails to parse certain assignments involving unicode identifiers - [playground link here](https://black.now.sh/?version=stable&state=_Td6WFoAAATm1rRGAgAhARYAAAB0L-Wj4ABLADtdAD2IimZxl1N_WjMy7A7oUimP5kl9tNitfjGTgMhZej2xgomiikPHniF7YMrqeF7JYab2JGKtxYQLJtMAAACEQNE3-XEpLQABV0wDcxaqH7bzfQEAAAAABFla).
```python
A᧚
A፩
```
This code is in fact valid Python, as you can confirm by pasting it into a repl or with `compile("A\u19da = 0")`.
Found, as with most of my bugs, via [Hypothesmith](https://github.com/Zac-HD/hypothesmith). Given that this applies to multiple unicode digit characters, it might be due to unicode digits in identifiers?
</issue>
<code>
[start of fuzz.py]
1 """Property-based tests for Black.
2
3 By Zac Hatfield-Dodds, based on my Hypothesmith tool for source code
4 generation. You can run this file with `python`, `pytest`, or (soon)
5 a coverage-guided fuzzer I'm working on.
6 """
7
8 import re
9
10 import hypothesmith
11 from hypothesis import HealthCheck, given, settings, strategies as st
12
13 import black
14 from blib2to3.pgen2.tokenize import TokenError
15
16
17 # This test uses the Hypothesis and Hypothesmith libraries to generate random
18 # syntatically-valid Python source code and run Black in odd modes.
19 @settings(
20 max_examples=1000, # roughly 1k tests/minute, or half that under coverage
21 derandomize=True, # deterministic mode to avoid CI flakiness
22 deadline=None, # ignore Hypothesis' health checks; we already know that
23 suppress_health_check=HealthCheck.all(), # this is slow and filter-heavy.
24 )
25 @given(
26 # Note that while Hypothesmith might generate code unlike that written by
27 # humans, it's a general test that should pass for any *valid* source code.
28 # (so e.g. running it against code scraped of the internet might also help)
29 src_contents=hypothesmith.from_grammar() | hypothesmith.from_node(),
30 # Using randomly-varied modes helps us to exercise less common code paths.
31 mode=st.builds(
32 black.FileMode,
33 line_length=st.just(88) | st.integers(0, 200),
34 string_normalization=st.booleans(),
35 preview=st.booleans(),
36 is_pyi=st.booleans(),
37 magic_trailing_comma=st.booleans(),
38 ),
39 )
40 def test_idempotent_any_syntatically_valid_python(
41 src_contents: str, mode: black.FileMode
42 ) -> None:
43 # Before starting, let's confirm that the input string is valid Python:
44 compile(src_contents, "<string>", "exec") # else the bug is in hypothesmith
45
46 # Then format the code...
47 try:
48 dst_contents = black.format_str(src_contents, mode=mode)
49 except black.InvalidInput:
50 # This is a bug - if it's valid Python code, as above, Black should be
51 # able to cope with it. See issues #970, #1012, #1358, and #1557.
52 # TODO: remove this try-except block when issues are resolved.
53 return
54 except TokenError as e:
55 if ( # Special-case logic for backslashes followed by newlines or end-of-input
56 e.args[0] == "EOF in multi-line statement"
57 and re.search(r"\\($|\r?\n)", src_contents) is not None
58 ):
59 # This is a bug - if it's valid Python code, as above, Black should be
60 # able to cope with it. See issue #1012.
61 # TODO: remove this block when the issue is resolved.
62 return
63 raise
64
65 # And check that we got equivalent and stable output.
66 black.assert_equivalent(src_contents, dst_contents)
67 black.assert_stable(src_contents, dst_contents, mode=mode)
68
69 # Future test: check that pure-python and mypyc versions of black
70 # give identical output for identical input?
71
72
73 if __name__ == "__main__":
74 # Run tests, including shrinking and reporting any known failures.
75 test_idempotent_any_syntatically_valid_python()
76
77 # If Atheris is available, run coverage-guided fuzzing.
78 # (if you want only bounded fuzzing, just use `pytest fuzz.py`)
79 try:
80 import sys
81 import atheris
82 except ImportError:
83 pass
84 else:
85 test = test_idempotent_any_syntatically_valid_python
86 atheris.Setup(sys.argv, test.hypothesis.fuzz_one_input)
87 atheris.Fuzz()
88
[end of fuzz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fuzz.py b/fuzz.py
--- a/fuzz.py
+++ b/fuzz.py
@@ -48,7 +48,7 @@
dst_contents = black.format_str(src_contents, mode=mode)
except black.InvalidInput:
# This is a bug - if it's valid Python code, as above, Black should be
- # able to cope with it. See issues #970, #1012, #1358, and #1557.
+ # able to cope with it. See issues #970, #1012
# TODO: remove this try-except block when issues are resolved.
return
except TokenError as e:
| {"golden_diff": "diff --git a/fuzz.py b/fuzz.py\n--- a/fuzz.py\n+++ b/fuzz.py\n@@ -48,7 +48,7 @@\n dst_contents = black.format_str(src_contents, mode=mode)\n except black.InvalidInput:\n # This is a bug - if it's valid Python code, as above, Black should be\n- # able to cope with it. See issues #970, #1012, #1358, and #1557.\n+ # able to cope with it. See issues #970, #1012\n # TODO: remove this try-except block when issues are resolved.\n return\n except TokenError as e:\n", "issue": "Add test for `A\u19da = 0`\nBlack v19.10b0 fails to parse certain assignments involving unicode identifiers - [playground link here](https://black.now.sh/?version=stable&state=_Td6WFoAAATm1rRGAgAhARYAAAB0L-Wj4ABLADtdAD2IimZxl1N_WjMy7A7oUimP5kl9tNitfjGTgMhZej2xgomiikPHniF7YMrqeF7JYab2JGKtxYQLJtMAAACEQNE3-XEpLQABV0wDcxaqH7bzfQEAAAAABFla).\r\n\r\n```python\r\nA\u19da\r\nA\u1369\r\n```\r\n\r\nThis code is in fact valid Python, as you can confirm by pasting it into a repl or with `compile(\"A\\u19da = 0\")`.\r\n\r\nFound, as with most of my bugs, via [Hypothesmith](https://github.com/Zac-HD/hypothesmith). Given that this applies to multiple unicode digit characters, it might be due to unicode digits in identifiers?\n", "before_files": [{"content": "\"\"\"Property-based tests for Black.\n\nBy Zac Hatfield-Dodds, based on my Hypothesmith tool for source code\ngeneration. You can run this file with `python`, `pytest`, or (soon)\na coverage-guided fuzzer I'm working on.\n\"\"\"\n\nimport re\n\nimport hypothesmith\nfrom hypothesis import HealthCheck, given, settings, strategies as st\n\nimport black\nfrom blib2to3.pgen2.tokenize import TokenError\n\n\n# This test uses the Hypothesis and Hypothesmith libraries to generate random\n# syntatically-valid Python source code and run Black in odd modes.\n@settings(\n max_examples=1000, # roughly 1k tests/minute, or half that under coverage\n derandomize=True, # deterministic mode to avoid CI flakiness\n deadline=None, # ignore Hypothesis' health checks; we already know that\n suppress_health_check=HealthCheck.all(), # this is slow and filter-heavy.\n)\n@given(\n # Note that while Hypothesmith might generate code unlike that written by\n # humans, it's a general test that should pass for any *valid* source code.\n # (so e.g. running it against code scraped of the internet might also help)\n src_contents=hypothesmith.from_grammar() | hypothesmith.from_node(),\n # Using randomly-varied modes helps us to exercise less common code paths.\n mode=st.builds(\n black.FileMode,\n line_length=st.just(88) | st.integers(0, 200),\n string_normalization=st.booleans(),\n preview=st.booleans(),\n is_pyi=st.booleans(),\n magic_trailing_comma=st.booleans(),\n ),\n)\ndef test_idempotent_any_syntatically_valid_python(\n src_contents: str, mode: black.FileMode\n) -> None:\n # Before starting, let's confirm that the input string is valid Python:\n compile(src_contents, \"<string>\", \"exec\") # else the bug is in hypothesmith\n\n # Then format the code...\n try:\n dst_contents = black.format_str(src_contents, mode=mode)\n except black.InvalidInput:\n # This is a bug - if it's valid Python code, as above, Black should be\n # able to cope with it. See issues #970, #1012, #1358, and #1557.\n # TODO: remove this try-except block when issues are resolved.\n return\n except TokenError as e:\n if ( # Special-case logic for backslashes followed by newlines or end-of-input\n e.args[0] == \"EOF in multi-line statement\"\n and re.search(r\"\\\\($|\\r?\\n)\", src_contents) is not None\n ):\n # This is a bug - if it's valid Python code, as above, Black should be\n # able to cope with it. See issue #1012.\n # TODO: remove this block when the issue is resolved.\n return\n raise\n\n # And check that we got equivalent and stable output.\n black.assert_equivalent(src_contents, dst_contents)\n black.assert_stable(src_contents, dst_contents, mode=mode)\n\n # Future test: check that pure-python and mypyc versions of black\n # give identical output for identical input?\n\n\nif __name__ == \"__main__\":\n # Run tests, including shrinking and reporting any known failures.\n test_idempotent_any_syntatically_valid_python()\n\n # If Atheris is available, run coverage-guided fuzzing.\n # (if you want only bounded fuzzing, just use `pytest fuzz.py`)\n try:\n import sys\n import atheris\n except ImportError:\n pass\n else:\n test = test_idempotent_any_syntatically_valid_python\n atheris.Setup(sys.argv, test.hypothesis.fuzz_one_input)\n atheris.Fuzz()\n", "path": "fuzz.py"}]} | 1,825 | 162 |
gh_patches_debug_52 | rasdani/github-patches | git_diff | Anselmoo__spectrafit-655 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs]: Using builtin release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of spectrafit/__init__.py]
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "0.16.4"
3
[end of spectrafit/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "0.16.4"
+__version__ = "1.0.0a0"
| {"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"0.16.4\"\n+__version__ = \"1.0.0a0\"\n", "issue": "[Docs]: Using builtin release drafter\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\nhttps://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"0.16.4\"\n", "path": "spectrafit/__init__.py"}]} | 653 | 96 |
gh_patches_debug_45047 | rasdani/github-patches | git_diff | e-valuation__EvaP-750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
collapse contributors with no answers in course detail pages
Contributors who didn't get any answers should be collapsed on the results pages, so that the empty answer lines are not shown.
This should also happen if there are answers in the database, but none of them can be seen by the current user.

</issue>
<code>
[start of evap/results/views.py]
1 from django.core.exceptions import PermissionDenied
2 from django.shortcuts import get_object_or_404, render
3 from django.contrib.auth.decorators import login_required
4
5 from evap.evaluation.models import Semester, Degree, Contribution
6 from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult
7
8
9 from collections import OrderedDict, namedtuple
10
11
12 @login_required
13 def index(request):
14 semesters = Semester.get_all_with_published_courses()
15
16 return render(request, "results_index.html", dict(semesters=semesters))
17
18
19 @login_required
20 def semester_detail(request, semester_id):
21 semester = get_object_or_404(Semester, id=semester_id)
22 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
23
24 # annotate each course object with its grades
25 for course in courses:
26 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
27
28 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))
29
30 courses_by_degree = OrderedDict()
31 for degree in Degree.objects.all():
32 courses_by_degree[degree] = CourseTuple([], [])
33 for course in courses:
34 if course.is_single_result():
35 for degree in course.degrees.all():
36 section = calculate_results(course)[0]
37 result = section.results[0]
38 courses_by_degree[degree].single_results.append((course, result))
39 else:
40 for degree in course.degrees.all():
41 courses_by_degree[degree].courses.append(course)
42
43 template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff)
44 return render(request, "results_semester_detail.html", template_data)
45
46
47 @login_required
48 def course_detail(request, semester_id, course_id):
49 semester = get_object_or_404(Semester, id=semester_id)
50 course = get_object_or_404(semester.course_set, id=course_id)
51
52 if not course.can_user_see_results(request.user):
53 raise PermissionDenied
54
55 sections = calculate_results(course)
56
57 public_view = request.GET.get('public_view', 'false') # default: show own view
58 public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean
59
60 represented_users = list(request.user.represented_users.all())
61 represented_users.append(request.user)
62
63 for section in sections:
64 results = []
65 for result in section.results:
66 if isinstance(result, TextResult):
67 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
68 if answers:
69 results.append(TextResult(question=result.question, answers=answers))
70 else:
71 results.append(result)
72 section.results[:] = results
73
74 # filter empty sections and group by contributor
75 course_sections = []
76 contributor_sections = OrderedDict()
77 for section in sections:
78 if not section.results:
79 continue
80 if section.contributor is None:
81 course_sections.append(section)
82 else:
83 contributor_sections.setdefault(section.contributor, []).append(section)
84
85 # show a warning if course is still in evaluation (for staff preview)
86 evaluation_warning = course.state != 'published'
87
88 # results for a course might not be visible because there are not enough answers
89 # but it can still be "published" e.g. to show the comment results to contributors.
90 # users who can open the results page see a warning message in this case
91 sufficient_votes_warning = not course.can_publish_grades
92
93 show_grades = request.user.is_staff or course.can_publish_grades
94
95 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
96
97 template_data = dict(
98 course=course,
99 course_sections=course_sections,
100 contributor_sections=contributor_sections,
101 evaluation_warning=evaluation_warning,
102 sufficient_votes_warning=sufficient_votes_warning,
103 show_grades=show_grades,
104 staff=request.user.is_staff,
105 contributor=course.is_user_contributor_or_delegate(request.user),
106 can_download_grades=request.user.can_download_grades,
107 public_view=public_view)
108 return render(request, "results_course_detail.html", template_data)
109
110 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
111 if public_view:
112 return False
113 if user.is_staff:
114 return True
115 contributor = text_answer.contribution.contributor
116 if text_answer.is_private:
117 return contributor == user
118 if text_answer.is_published:
119 if contributor in represented_users:
120 return True
121 if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
122 return True
123 if text_answer.contribution.is_general and \
124 text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
125 return True
126
127 return False
128
[end of evap/results/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/results/views.py b/evap/results/views.py
--- a/evap/results/views.py
+++ b/evap/results/views.py
@@ -3,8 +3,7 @@
from django.contrib.auth.decorators import login_required
from evap.evaluation.models import Semester, Degree, Contribution
-from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult
-
+from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult
from collections import OrderedDict, namedtuple
@@ -21,7 +20,7 @@
semester = get_object_or_404(Semester, id=semester_id)
courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
- # annotate each course object with its grades
+ # Annotate each course object with its grades.
for course in courses:
course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
@@ -54,8 +53,8 @@
sections = calculate_results(course)
- public_view = request.GET.get('public_view', 'false') # default: show own view
- public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean
+ public_view = request.GET.get('public_view', 'false') # Default: show own view.
+ public_view = {'true': True, 'false': False}.get(public_view.lower()) # Convert parameter to boolean.
represented_users = list(request.user.represented_users.all())
represented_users.append(request.user)
@@ -71,7 +70,7 @@
results.append(result)
section.results[:] = results
- # filter empty sections and group by contributor
+ # Filter empty sections and group by contributor.
course_sections = []
contributor_sections = OrderedDict()
for section in sections:
@@ -80,14 +79,21 @@
if section.contributor is None:
course_sections.append(section)
else:
- contributor_sections.setdefault(section.contributor, []).append(section)
+ contributor_sections.setdefault(section.contributor,
+ {'total_votes': 0, 'sections': []})['sections'].append(section)
+
+ # Sum up all Sections for this contributor.
+ # If section is not a RatingResult:
+ # Add 1 as we assume it is a TextResult or something similar that should be displayed.
+ contributor_sections[section.contributor]['total_votes'] +=\
+ sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])
- # show a warning if course is still in evaluation (for staff preview)
+ # Show a warning if course is still in evaluation (for staff preview).
evaluation_warning = course.state != 'published'
- # results for a course might not be visible because there are not enough answers
+ # Results for a course might not be visible because there are not enough answers
# but it can still be "published" e.g. to show the comment results to contributors.
- # users who can open the results page see a warning message in this case
+ # Users who can open the results page see a warning message in this case.
sufficient_votes_warning = not course.can_publish_grades
show_grades = request.user.is_staff or course.can_publish_grades
@@ -107,6 +113,7 @@
public_view=public_view)
return render(request, "results_course_detail.html", template_data)
+
def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
if public_view:
return False
@@ -118,10 +125,11 @@
if text_answer.is_published:
if contributor in represented_users:
return True
- if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
+ if text_answer.contribution.course.contributions.filter(
+ contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
return True
- if text_answer.contribution.is_general and \
- text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
+ if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(
+ contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
return True
return False
| {"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -3,8 +3,7 @@\n from django.contrib.auth.decorators import login_required\n \n from evap.evaluation.models import Semester, Degree, Contribution\n-from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult\n-\n+from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult\n \n from collections import OrderedDict, namedtuple\n \n@@ -21,7 +20,7 @@\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n \n- # annotate each course object with its grades\n+ # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n \n@@ -54,8 +53,8 @@\n \n sections = calculate_results(course)\n \n- public_view = request.GET.get('public_view', 'false') # default: show own view\n- public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean\n+ public_view = request.GET.get('public_view', 'false') # Default: show own view.\n+ public_view = {'true': True, 'false': False}.get(public_view.lower()) # Convert parameter to boolean.\n \n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n@@ -71,7 +70,7 @@\n results.append(result)\n section.results[:] = results\n \n- # filter empty sections and group by contributor\n+ # Filter empty sections and group by contributor.\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n@@ -80,14 +79,21 @@\n if section.contributor is None:\n course_sections.append(section)\n else:\n- contributor_sections.setdefault(section.contributor, []).append(section)\n+ contributor_sections.setdefault(section.contributor,\n+ {'total_votes': 0, 'sections': []})['sections'].append(section)\n+\n+ # Sum up all Sections for this contributor.\n+ # If section is not a RatingResult:\n+ # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n+ contributor_sections[section.contributor]['total_votes'] +=\\\n+ sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n \n- # show a warning if course is still in evaluation (for staff preview)\n+ # Show a warning if course is still in evaluation (for staff preview).\n evaluation_warning = course.state != 'published'\n \n- # results for a course might not be visible because there are not enough answers\n+ # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n- # users who can open the results page see a warning message in this case\n+ # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n \n show_grades = request.user.is_staff or course.can_publish_grades\n@@ -107,6 +113,7 @@\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n \n+\n def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n@@ -118,10 +125,11 @@\n if text_answer.is_published:\n if contributor in represented_users:\n return True\n- if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n+ if text_answer.contribution.course.contributions.filter(\n+ contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n- if text_answer.contribution.is_general and \\\n- text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n+ if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n+ contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n \n return False\n", "issue": "collapse contributors with no answers in course detail pages\nContributors who didn't get any answers should be collapsed on the results pages, so that the empty answer lines are not shown.\nThis should also happen if there are answers in the database, but none of them can be seen by the current user.\n\n\n\n", "before_files": [{"content": "from django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult\n\n\nfrom collections import OrderedDict, namedtuple\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n # annotate each course object with its grades\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result():\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n public_view = request.GET.get('public_view', 'false') # default: show own view\n public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # filter empty sections and group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor, []).append(section)\n\n # show a warning if course is still in evaluation (for staff preview)\n evaluation_warning = course.state != 'published'\n\n # results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # users who can open the results page see a warning message in this case\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_staff or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n staff=request.user.is_staff,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if user.is_staff:\n return True\n contributor = text_answer.contribution.contributor\n if text_answer.is_private:\n return contributor == user\n if text_answer.is_published:\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and \\\n text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]} | 2,024 | 1,004 |
gh_patches_debug_631 | rasdani/github-patches | git_diff | pex-tool__pex-2042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.121
On the docket:
+ [x] Building Pex with requirements.txt that includes local directory + Python version specifier fails #2037
+ [x] Failed to resolve compatible distributions when building Pex from .whl with local dependencies #2038
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.120"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.120"
+__version__ = "2.1.121"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.120\"\n+__version__ = \"2.1.121\"\n", "issue": "Release 2.1.121\nOn the docket:\r\n+ [x] Building Pex with requirements.txt that includes local directory + Python version specifier fails #2037 \r\n+ [x] Failed to resolve compatible distributions when building Pex from .whl with local dependencies #2038 \r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.120\"\n", "path": "pex/version.py"}]} | 652 | 99 |
gh_patches_debug_25893 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-551 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use predicates to prevent access to items if no phase has started yet
With topicprio and polls items are created before any phase has been started.
On the project_detail page a according message and no content is shown.
This is checked via the `PhaseDispatchMixin`.
As for items it is not possible to use the `PhaseDispatchMixin` a new
predicate has been introduced which tests if a phase has been started
yet. Be aware, that this does not imply an active phase.
If no phase has been started yet access to the items should be prevented.
But if participation did finish the items should still be available.
See 362a8ce6a79888b2cc1dbdeb789107a156c5d15f for reference.
</issue>
<code>
[start of apps/polls/rules.py]
1 import rules
2 from rules.predicates import is_superuser
3
4 from adhocracy4.modules import predicates as module_predicates
5
6 from . import models
7
8 rules.add_perm(
9 'meinberlin_polls.change_poll',
10 is_superuser | module_predicates.is_context_initiator
11 )
12
13 rules.add_perm(
14 'meinberlin_polls.view_poll',
15 module_predicates.is_public_context
16 )
17
18 rules.add_perm(
19 'meinberlin_polls.comment_poll',
20 module_predicates.is_allowed_comment_item
21 )
22
23 rules.add_perm(
24 'meinberlin_polls.add_vote',
25 module_predicates.is_allowed_add_item(models.Vote)
26 )
27
28 rules.add_perm(
29 'meinberlin_polls.change_vote',
30 module_predicates.is_allowed_add_item(models.Vote)
31 )
32
[end of apps/polls/rules.py]
[start of apps/topicprio/rules.py]
1 import rules
2
3 from adhocracy4.modules import predicates as module_predicates
4
5 rules.add_perm(
6 'meinberlin_topicprio.add_topic',
7 module_predicates.is_project_admin
8 )
9
10 rules.add_perm(
11 'meinberlin_topicprio.change_topic',
12 module_predicates.is_project_admin
13 )
14
15 rules.add_perm(
16 'meinberlin_topicprio.rate_topic',
17 module_predicates.is_allowed_rate_item
18 )
19
20 rules.add_perm(
21 'meinberlin_topicprio.comment_topic',
22 module_predicates.is_allowed_comment_item
23 )
24
[end of apps/topicprio/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/polls/rules.py b/apps/polls/rules.py
--- a/apps/polls/rules.py
+++ b/apps/polls/rules.py
@@ -2,6 +2,7 @@
from rules.predicates import is_superuser
from adhocracy4.modules import predicates as module_predicates
+from apps.contrib import predicates as contrib_predicates
from . import models
@@ -12,7 +13,9 @@
rules.add_perm(
'meinberlin_polls.view_poll',
- module_predicates.is_public_context
+ (module_predicates.is_project_admin |
+ (module_predicates.is_allowed_view_item &
+ contrib_predicates.has_context_started))
)
rules.add_perm(
diff --git a/apps/topicprio/rules.py b/apps/topicprio/rules.py
--- a/apps/topicprio/rules.py
+++ b/apps/topicprio/rules.py
@@ -1,6 +1,7 @@
import rules
from adhocracy4.modules import predicates as module_predicates
+from apps.contrib import predicates as contrib_predicates
rules.add_perm(
'meinberlin_topicprio.add_topic',
@@ -12,6 +13,13 @@
module_predicates.is_project_admin
)
+rules.add_perm(
+ 'meinberlin_topicprio.view_topic',
+ (module_predicates.is_project_admin |
+ (module_predicates.is_allowed_view_item &
+ contrib_predicates.has_context_started))
+)
+
rules.add_perm(
'meinberlin_topicprio.rate_topic',
module_predicates.is_allowed_rate_item
| {"golden_diff": "diff --git a/apps/polls/rules.py b/apps/polls/rules.py\n--- a/apps/polls/rules.py\n+++ b/apps/polls/rules.py\n@@ -2,6 +2,7 @@\n from rules.predicates import is_superuser\n \n from adhocracy4.modules import predicates as module_predicates\n+from apps.contrib import predicates as contrib_predicates\n \n from . import models\n \n@@ -12,7 +13,9 @@\n \n rules.add_perm(\n 'meinberlin_polls.view_poll',\n- module_predicates.is_public_context\n+ (module_predicates.is_project_admin |\n+ (module_predicates.is_allowed_view_item &\n+ contrib_predicates.has_context_started))\n )\n \n rules.add_perm(\ndiff --git a/apps/topicprio/rules.py b/apps/topicprio/rules.py\n--- a/apps/topicprio/rules.py\n+++ b/apps/topicprio/rules.py\n@@ -1,6 +1,7 @@\n import rules\n \n from adhocracy4.modules import predicates as module_predicates\n+from apps.contrib import predicates as contrib_predicates\n \n rules.add_perm(\n 'meinberlin_topicprio.add_topic',\n@@ -12,6 +13,13 @@\n module_predicates.is_project_admin\n )\n \n+rules.add_perm(\n+ 'meinberlin_topicprio.view_topic',\n+ (module_predicates.is_project_admin |\n+ (module_predicates.is_allowed_view_item &\n+ contrib_predicates.has_context_started))\n+)\n+\n rules.add_perm(\n 'meinberlin_topicprio.rate_topic',\n module_predicates.is_allowed_rate_item\n", "issue": "Use predicates to prevent access to items if no phase has started yet\nWith topicprio and polls items are created before any phase has been started.\r\nOn the project_detail page a according message and no content is shown.\r\nThis is checked via the `PhaseDispatchMixin`.\r\nAs for items it is not possible to use the `PhaseDispatchMixin` a new\r\npredicate has been introduced which tests if a phase has been started\r\nyet. Be aware, that this does not imply an active phase.\r\nIf no phase has been started yet access to the items should be prevented. \r\nBut if participation did finish the items should still be available.\r\nSee 362a8ce6a79888b2cc1dbdeb789107a156c5d15f for reference. \n", "before_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.modules import predicates as module_predicates\n\nfrom . import models\n\nrules.add_perm(\n 'meinberlin_polls.change_poll',\n is_superuser | module_predicates.is_context_initiator\n)\n\nrules.add_perm(\n 'meinberlin_polls.view_poll',\n module_predicates.is_public_context\n)\n\nrules.add_perm(\n 'meinberlin_polls.comment_poll',\n module_predicates.is_allowed_comment_item\n)\n\nrules.add_perm(\n 'meinberlin_polls.add_vote',\n module_predicates.is_allowed_add_item(models.Vote)\n)\n\nrules.add_perm(\n 'meinberlin_polls.change_vote',\n module_predicates.is_allowed_add_item(models.Vote)\n)\n", "path": "apps/polls/rules.py"}, {"content": "import rules\n\nfrom adhocracy4.modules import predicates as module_predicates\n\nrules.add_perm(\n 'meinberlin_topicprio.add_topic',\n module_predicates.is_project_admin\n)\n\nrules.add_perm(\n 'meinberlin_topicprio.change_topic',\n module_predicates.is_project_admin\n)\n\nrules.add_perm(\n 'meinberlin_topicprio.rate_topic',\n module_predicates.is_allowed_rate_item\n)\n\nrules.add_perm(\n 'meinberlin_topicprio.comment_topic',\n module_predicates.is_allowed_comment_item\n)\n", "path": "apps/topicprio/rules.py"}]} | 1,110 | 341 |
gh_patches_debug_27110 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3225 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Need to add the Yum updates datasource to the documentation
The [PR](https://github.com/RedHatInsights/insights-core/pull/2993/files#diff-22151ef794ba196097984a47bf24b6759c261de6dc062ac541da099084e5c50a) adding this datasource did not add the datasource to the documentation [here](https://github.com/RedHatInsights/insights-core/blob/master/docs/custom_datasources_index.rst).
</issue>
<code>
[start of insights/specs/datasources/yum_updates.py]
1 """
2 Custom datasource for collecting yum updates
3 """
4 import json
5 import time
6
7 from insights import datasource, HostContext, SkipComponent
8 from insights.components.rhel_version import IsRhel7
9 from insights.core.spec_factory import DatasourceProvider
10
11 sorted_cmp = None
12 try:
13 # cmp_to_key is not available in python 2.6, but it has sorted function which accepts cmp function
14 def sorted_cmp(it, cmp):
15 from functools import cmp_to_key
16 return sorted(it, key=cmp_to_key(cmp))
17 except ImportError:
18 sorted_cmp = sorted
19
20
21 class UpdatesManager:
22 """ Performs package resolution on yum based systems """
23 def __init__(self):
24 import yum
25
26 self.base = yum.YumBase()
27 self.base.doGenericSetup(cache=1)
28 self.releasever = self.base.conf.yumvar['releasever']
29 self.basearch = self.base.conf.yumvar['basearch']
30 self.packages = []
31 self.repos = []
32 self.updict = {}
33
34 def __enter__(self):
35 return self
36
37 def __exit__(self, *args):
38 pass
39
40 @staticmethod
41 def pkg_cmp(a, b):
42 vercmp = a.verCMP(b)
43 if vercmp != 0:
44 return vercmp
45 if a.repoid != b.repoid:
46 return -1 if a.repoid < b.repoid else 1
47 return 0
48
49 def sorted_pkgs(self, pkgs):
50 return sorted_cmp(pkgs, self.pkg_cmp)
51
52 def load(self):
53 self.base.doRepoSetup()
54 self.base.doSackSetup()
55 self.packages = self.base.pkgSack.returnPackages()
56 self.repos = self.base.repos.repos
57 self._build_updict()
58
59 def _build_updict(self):
60 self.updict = {}
61 for pkg in self.packages:
62 self.updict.setdefault(pkg.na, []).append(pkg)
63
64 def enabled_repos(self):
65 return [repo.id for repo in self.base.repos.listEnabled()]
66
67 def installed_packages(self):
68 return self.base.rpmdb.returnPackages()
69
70 def updates(self, pkg):
71 nevra = pkg.nevra
72 updates_list = []
73 for upg in self.updict[pkg.na]:
74 if upg.verGT(pkg):
75 updates_list.append(upg)
76 return nevra, updates_list
77
78 @staticmethod
79 def pkg_nevra(pkg):
80 return "{}-{}:{}-{}.{}".format(pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch)
81
82 @staticmethod
83 def pkg_repo(pkg):
84 return pkg.repoid
85
86 def advisory(self, pkg):
87 adv = self.base.upinfo.get_notice(pkg.nvr)
88 if adv:
89 return adv.get_metadata()['update_id']
90 return None
91
92 @staticmethod
93 def last_update():
94 return 0
95
96
97 @datasource(HostContext, [IsRhel7])
98 def yum_updates(_broker):
99 """
100 This datasource provides a list of available updates on the system.
101 It uses the yum python library installed locally, and collects list of
102 available package updates, along with advisory info where applicable.
103 """
104
105 if not _broker.get(IsRhel7):
106 raise SkipComponent("Yum updates currently only works on RHEL 7")
107
108 with UpdatesManager() as umgr:
109 umgr.load()
110
111 response = {
112 "releasever": umgr.releasever,
113 "basearch": umgr.basearch,
114 "update_list": {},
115 }
116
117 data = {'package_list': umgr.installed_packages()}
118 updates = {}
119 for pkg in data["package_list"]:
120 (nevra, updates_list) = umgr.updates(pkg)
121 updates[nevra] = updates_list
122 for (nevra, update_list) in updates.items():
123 if update_list:
124 out_list = []
125 for pkg in umgr.sorted_pkgs(update_list):
126 pkg_dict = {
127 "package": umgr.pkg_nevra(pkg),
128 "repository": umgr.pkg_repo(pkg),
129 "basearch": response["basearch"],
130 "releasever": response["releasever"],
131 }
132 erratum = umgr.advisory(pkg)
133 if erratum:
134 pkg_dict["erratum"] = erratum
135 out_list.append(pkg_dict)
136 response["update_list"][nevra] = {"available_updates": out_list}
137
138 ts = umgr.last_update()
139 if ts:
140 response["metadata_time"] = time.strftime("%FT%TZ", time.gmtime(ts))
141 return DatasourceProvider(content=json.dumps(response), relative_path='insights_commands/yum_updates_list')
142
[end of insights/specs/datasources/yum_updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/specs/datasources/yum_updates.py b/insights/specs/datasources/yum_updates.py
--- a/insights/specs/datasources/yum_updates.py
+++ b/insights/specs/datasources/yum_updates.py
@@ -70,7 +70,7 @@
def updates(self, pkg):
nevra = pkg.nevra
updates_list = []
- for upg in self.updict[pkg.na]:
+ for upg in self.updict.get(pkg.na, []):
if upg.verGT(pkg):
updates_list.append(upg)
return nevra, updates_list
@@ -100,6 +100,32 @@
This datasource provides a list of available updates on the system.
It uses the yum python library installed locally, and collects list of
available package updates, along with advisory info where applicable.
+
+ Sample data returned::
+
+ {
+ "releasever": "8",
+ "basearch": "x86_64",
+ "update_list": {
+ "NetworkManager-1:1.22.8-4.el8.x86_64": {
+ "available_updates": [
+ {
+ "package": "NetworkManager-1:1.22.8-5.el8_2.x86_64",
+ "repository": "rhel-8-for-x86_64-baseos-rpms",
+ "basearch": "x86_64",
+ "releasever": "8",
+ "erratum": "RHSA-2020:3011"
+ }
+ ]
+ }
+ },
+ "metadata_time": "2021-01-01T09:39:45Z"
+ }
+
+ Returns:
+ list: List of available updates
+ Raises:
+ SkipComponent: Raised on systems different than RHEL 7
"""
if not _broker.get(IsRhel7):
| {"golden_diff": "diff --git a/insights/specs/datasources/yum_updates.py b/insights/specs/datasources/yum_updates.py\n--- a/insights/specs/datasources/yum_updates.py\n+++ b/insights/specs/datasources/yum_updates.py\n@@ -70,7 +70,7 @@\n def updates(self, pkg):\n nevra = pkg.nevra\n updates_list = []\n- for upg in self.updict[pkg.na]:\n+ for upg in self.updict.get(pkg.na, []):\n if upg.verGT(pkg):\n updates_list.append(upg)\n return nevra, updates_list\n@@ -100,6 +100,32 @@\n This datasource provides a list of available updates on the system.\n It uses the yum python library installed locally, and collects list of\n available package updates, along with advisory info where applicable.\n+\n+ Sample data returned::\n+\n+ {\n+ \"releasever\": \"8\",\n+ \"basearch\": \"x86_64\",\n+ \"update_list\": {\n+ \"NetworkManager-1:1.22.8-4.el8.x86_64\": {\n+ \"available_updates\": [\n+ {\n+ \"package\": \"NetworkManager-1:1.22.8-5.el8_2.x86_64\",\n+ \"repository\": \"rhel-8-for-x86_64-baseos-rpms\",\n+ \"basearch\": \"x86_64\",\n+ \"releasever\": \"8\",\n+ \"erratum\": \"RHSA-2020:3011\"\n+ }\n+ ]\n+ }\n+ },\n+ \"metadata_time\": \"2021-01-01T09:39:45Z\"\n+ }\n+\n+ Returns:\n+ list: List of available updates\n+ Raises:\n+ SkipComponent: Raised on systems different than RHEL 7\n \"\"\"\n \n if not _broker.get(IsRhel7):\n", "issue": "Need to add the Yum updates datasource to the documentation\nThe [PR](https://github.com/RedHatInsights/insights-core/pull/2993/files#diff-22151ef794ba196097984a47bf24b6759c261de6dc062ac541da099084e5c50a) adding this datasource did not add the datasource to the documentation [here](https://github.com/RedHatInsights/insights-core/blob/master/docs/custom_datasources_index.rst).\n", "before_files": [{"content": "\"\"\"\nCustom datasource for collecting yum updates\n\"\"\"\nimport json\nimport time\n\nfrom insights import datasource, HostContext, SkipComponent\nfrom insights.components.rhel_version import IsRhel7\nfrom insights.core.spec_factory import DatasourceProvider\n\nsorted_cmp = None\ntry:\n # cmp_to_key is not available in python 2.6, but it has sorted function which accepts cmp function\n def sorted_cmp(it, cmp):\n from functools import cmp_to_key\n return sorted(it, key=cmp_to_key(cmp))\nexcept ImportError:\n sorted_cmp = sorted\n\n\nclass UpdatesManager:\n \"\"\" Performs package resolution on yum based systems \"\"\"\n def __init__(self):\n import yum\n\n self.base = yum.YumBase()\n self.base.doGenericSetup(cache=1)\n self.releasever = self.base.conf.yumvar['releasever']\n self.basearch = self.base.conf.yumvar['basearch']\n self.packages = []\n self.repos = []\n self.updict = {}\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n pass\n\n @staticmethod\n def pkg_cmp(a, b):\n vercmp = a.verCMP(b)\n if vercmp != 0:\n return vercmp\n if a.repoid != b.repoid:\n return -1 if a.repoid < b.repoid else 1\n return 0\n\n def sorted_pkgs(self, pkgs):\n return sorted_cmp(pkgs, self.pkg_cmp)\n\n def load(self):\n self.base.doRepoSetup()\n self.base.doSackSetup()\n self.packages = self.base.pkgSack.returnPackages()\n self.repos = self.base.repos.repos\n self._build_updict()\n\n def _build_updict(self):\n self.updict = {}\n for pkg in self.packages:\n self.updict.setdefault(pkg.na, []).append(pkg)\n\n def enabled_repos(self):\n return [repo.id for repo in self.base.repos.listEnabled()]\n\n def installed_packages(self):\n return self.base.rpmdb.returnPackages()\n\n def updates(self, pkg):\n nevra = pkg.nevra\n updates_list = []\n for upg in self.updict[pkg.na]:\n if upg.verGT(pkg):\n updates_list.append(upg)\n return nevra, updates_list\n\n @staticmethod\n def pkg_nevra(pkg):\n return \"{}-{}:{}-{}.{}\".format(pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch)\n\n @staticmethod\n def pkg_repo(pkg):\n return pkg.repoid\n\n def advisory(self, pkg):\n adv = self.base.upinfo.get_notice(pkg.nvr)\n if adv:\n return adv.get_metadata()['update_id']\n return None\n\n @staticmethod\n def last_update():\n return 0\n\n\n@datasource(HostContext, [IsRhel7])\ndef yum_updates(_broker):\n \"\"\"\n This datasource provides a list of available updates on the system.\n It uses the yum python library installed locally, and collects list of\n available package updates, along with advisory info where applicable.\n \"\"\"\n\n if not _broker.get(IsRhel7):\n raise SkipComponent(\"Yum updates currently only works on RHEL 7\")\n\n with UpdatesManager() as umgr:\n umgr.load()\n\n response = {\n \"releasever\": umgr.releasever,\n \"basearch\": umgr.basearch,\n \"update_list\": {},\n }\n\n data = {'package_list': umgr.installed_packages()}\n updates = {}\n for pkg in data[\"package_list\"]:\n (nevra, updates_list) = umgr.updates(pkg)\n updates[nevra] = updates_list\n for (nevra, update_list) in updates.items():\n if update_list:\n out_list = []\n for pkg in umgr.sorted_pkgs(update_list):\n pkg_dict = {\n \"package\": umgr.pkg_nevra(pkg),\n \"repository\": umgr.pkg_repo(pkg),\n \"basearch\": response[\"basearch\"],\n \"releasever\": response[\"releasever\"],\n }\n erratum = umgr.advisory(pkg)\n if erratum:\n pkg_dict[\"erratum\"] = erratum\n out_list.append(pkg_dict)\n response[\"update_list\"][nevra] = {\"available_updates\": out_list}\n\n ts = umgr.last_update()\n if ts:\n response[\"metadata_time\"] = time.strftime(\"%FT%TZ\", time.gmtime(ts))\n return DatasourceProvider(content=json.dumps(response), relative_path='insights_commands/yum_updates_list')\n", "path": "insights/specs/datasources/yum_updates.py"}]} | 2,007 | 467 |
gh_patches_debug_4347 | rasdani/github-patches | git_diff | ipython__ipython-5395 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Converting notebooks with spaces in their names to RST gives broken images
I am using `ipython nbconvert --to rst example1.ipynb` to convert my example notebooks into reStructuredText, for incorporation into my package's Sphinx documentation. This works quite well, unless the filename has a space in it. In this case, any image files from my notebooks are lost when I run Sphinx's `make html`.
My guess is that the problem seems is in the generated rst file, where the `.. image` command may need to be escaped or quoted somehow to work with spaces in the filename.
I note that a similar issue was reported and resolved for latex output in issue #3774, however the solution was specific to latex.
</issue>
<code>
[start of IPython/nbconvert/filters/markdown.py]
1 """Markdown filters
2 This file contains a collection of utility filters for dealing with
3 markdown within Jinja templates.
4 """
5 #-----------------------------------------------------------------------------
6 # Copyright (c) 2013, the IPython Development Team.
7 #
8 # Distributed under the terms of the Modified BSD License.
9 #
10 # The full license is in the file COPYING.txt, distributed with this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17
18 # Stdlib imports
19 import os
20 import subprocess
21 import warnings
22 from io import TextIOWrapper, BytesIO
23
24 # IPython imports
25 from IPython.nbconvert.utils.pandoc import pandoc
26 from IPython.nbconvert.utils.exceptions import ConversionException
27 from IPython.utils.process import get_output_error_code
28 from IPython.utils.py3compat import cast_bytes
29 from IPython.utils.version import check_version
30
31 #-----------------------------------------------------------------------------
32 # Functions
33 #-----------------------------------------------------------------------------
34 marked = os.path.join(os.path.dirname(__file__), "marked.js")
35 _node = None
36
37 __all__ = [
38 'markdown2html',
39 'markdown2html_pandoc',
40 'markdown2html_marked',
41 'markdown2latex',
42 'markdown2rst',
43 ]
44
45 class NodeJSMissing(ConversionException):
46 """Exception raised when node.js is missing."""
47 pass
48
49 def markdown2latex(source):
50 """Convert a markdown string to LaTeX via pandoc.
51
52 This function will raise an error if pandoc is not installed.
53 Any error messages generated by pandoc are printed to stderr.
54
55 Parameters
56 ----------
57 source : string
58 Input string, assumed to be valid markdown.
59
60 Returns
61 -------
62 out : string
63 Output as returned by pandoc.
64 """
65 return pandoc(source, 'markdown', 'latex')
66
67 def markdown2html(source):
68 """Convert a markdown string to HTML"""
69 global _node
70 if _node is None:
71 # prefer md2html via marked if node.js >= 0.9.12 is available
72 # node is called nodejs on debian, so try that first
73 _node = 'nodejs'
74 if not _verify_node(_node):
75 _node = 'node'
76 if not _verify_node(_node):
77 warnings.warn( "Node.js 0.9.12 or later wasn't found.\n" +
78 "Nbconvert will try to use Pandoc instead.")
79 _node = False
80 if _node:
81 return markdown2html_marked(source)
82 else:
83 return markdown2html_pandoc(source)
84
85 def markdown2html_pandoc(source):
86 """Convert a markdown string to HTML via pandoc"""
87 return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])
88
89 def markdown2html_marked(source, encoding='utf-8'):
90 """Convert a markdown string to HTML via marked"""
91 command = [_node, marked]
92 try:
93 p = subprocess.Popen(command,
94 stdin=subprocess.PIPE, stdout=subprocess.PIPE
95 )
96 except OSError as e:
97 raise NodeJSMissing(
98 "The command '%s' returned an error: %s.\n" % (" ".join(command), e) +
99 "Please check that Node.js is installed."
100 )
101 out, _ = p.communicate(cast_bytes(source, encoding))
102 out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()
103 return out.rstrip('\n')
104
105 def markdown2rst(source):
106 """Convert a markdown string to LaTeX via pandoc.
107
108 This function will raise an error if pandoc is not installed.
109 Any error messages generated by pandoc are printed to stderr.
110
111 Parameters
112 ----------
113 source : string
114 Input string, assumed to be valid markdown.
115
116 Returns
117 -------
118 out : string
119 Output as returned by pandoc.
120 """
121 return pandoc(source, 'markdown', 'rst')
122
123 def _verify_node(cmd):
124 """Verify that the node command exists and is at least the minimum supported
125 version of node.
126
127 Parameters
128 ----------
129 cmd : string
130 Node command to verify (i.e 'node')."""
131 try:
132 out, err, return_code = get_output_error_code([cmd, '--version'])
133 except OSError:
134 # Command not found
135 return False
136 if return_code:
137 # Command error
138 return False
139 return check_version(out.lstrip('v'), '0.9.12')
140
[end of IPython/nbconvert/filters/markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py
--- a/IPython/nbconvert/filters/markdown.py
+++ b/IPython/nbconvert/filters/markdown.py
@@ -103,7 +103,7 @@
return out.rstrip('\n')
def markdown2rst(source):
- """Convert a markdown string to LaTeX via pandoc.
+ """Convert a markdown string to ReST via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
| {"golden_diff": "diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py\n--- a/IPython/nbconvert/filters/markdown.py\n+++ b/IPython/nbconvert/filters/markdown.py\n@@ -103,7 +103,7 @@\n return out.rstrip('\\n')\n \n def markdown2rst(source):\n- \"\"\"Convert a markdown string to LaTeX via pandoc.\n+ \"\"\"Convert a markdown string to ReST via pandoc.\n \n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n", "issue": "Converting notebooks with spaces in their names to RST gives broken images\nI am using `ipython nbconvert --to rst example1.ipynb` to convert my example notebooks into reStructuredText, for incorporation into my package's Sphinx documentation. This works quite well, unless the filename has a space in it. In this case, any image files from my notebooks are lost when I run Sphinx's `make html`. \n\nMy guess is that the problem seems is in the generated rst file, where the `.. image` command may need to be escaped or quoted somehow to work with spaces in the filename.\n\nI note that a similar issue was reported and resolved for latex output in issue #3774, however the solution was specific to latex.\n\n", "before_files": [{"content": "\"\"\"Markdown filters\nThis file contains a collection of utility filters for dealing with \nmarkdown within Jinja templates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib imports\nimport os\nimport subprocess\nimport warnings\nfrom io import TextIOWrapper, BytesIO\n\n# IPython imports\nfrom IPython.nbconvert.utils.pandoc import pandoc\nfrom IPython.nbconvert.utils.exceptions import ConversionException\nfrom IPython.utils.process import get_output_error_code\nfrom IPython.utils.py3compat import cast_bytes\nfrom IPython.utils.version import check_version\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\nmarked = os.path.join(os.path.dirname(__file__), \"marked.js\")\n_node = None\n\n__all__ = [\n 'markdown2html',\n 'markdown2html_pandoc',\n 'markdown2html_marked',\n 'markdown2latex',\n 'markdown2rst',\n]\n\nclass NodeJSMissing(ConversionException):\n \"\"\"Exception raised when node.js is missing.\"\"\"\n pass\n\ndef markdown2latex(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'latex')\n\ndef markdown2html(source):\n \"\"\"Convert a markdown string to HTML\"\"\"\n global _node\n if _node is None:\n # prefer md2html via marked if node.js >= 0.9.12 is available\n # node is called nodejs on debian, so try that first\n _node = 'nodejs'\n if not _verify_node(_node):\n _node = 'node'\n if not _verify_node(_node):\n warnings.warn( \"Node.js 0.9.12 or later wasn't found.\\n\" +\n \"Nbconvert will try to use Pandoc instead.\")\n _node = False\n if _node:\n return markdown2html_marked(source)\n else:\n return markdown2html_pandoc(source)\n\ndef markdown2html_pandoc(source):\n \"\"\"Convert a markdown string to HTML via pandoc\"\"\"\n return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])\n\ndef markdown2html_marked(source, encoding='utf-8'):\n \"\"\"Convert a markdown string to HTML via marked\"\"\"\n command = [_node, marked]\n try:\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE\n )\n except OSError as e:\n raise NodeJSMissing(\n \"The command '%s' returned an error: %s.\\n\" % (\" \".join(command), e) +\n \"Please check that Node.js is installed.\"\n )\n out, _ = p.communicate(cast_bytes(source, encoding))\n out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()\n return out.rstrip('\\n')\n\ndef markdown2rst(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'rst')\n\ndef _verify_node(cmd):\n \"\"\"Verify that the node command exists and is at least the minimum supported\n version of node.\n\n Parameters\n ----------\n cmd : string\n Node command to verify (i.e 'node').\"\"\"\n try:\n out, err, return_code = get_output_error_code([cmd, '--version'])\n except OSError:\n # Command not found\n return False\n if return_code:\n # Command error\n return False\n return check_version(out.lstrip('v'), '0.9.12')\n", "path": "IPython/nbconvert/filters/markdown.py"}]} | 1,959 | 136 |
gh_patches_debug_16961 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5977 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "totals" to "view feedback" page
### Description
It would be useful to add totals for each column in the "view feedback" page.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/nomcom/templatetags/nomcom_tags.py]
1 # Copyright The IETF Trust 2013-2019, All Rights Reserved
2 import os
3 import tempfile
4 import re
5
6 from django import template
7 from django.conf import settings
8 from django.template.defaultfilters import linebreaksbr, force_escape
9 from django.utils.encoding import force_str, DjangoUnicodeDecodeError
10 from django.utils.safestring import mark_safe
11
12 import debug # pyflakes:ignore
13
14 from ietf.nomcom.utils import get_nomcom_by_year, retrieve_nomcom_private_key
15 from ietf.person.models import Person
16 from ietf.utils.log import log
17 from ietf.utils.pipe import pipe
18
19
20 register = template.Library()
21
22
23 @register.filter
24 def is_chair_or_advisor(user, year):
25 if not user or not year:
26 return False
27 nomcom = get_nomcom_by_year(year=year)
28 return nomcom.group.has_role(user, ["chair","advisor"])
29
30
31 @register.filter
32 def has_publickey(nomcom):
33 return nomcom and nomcom.public_key and True or False
34
35 @register.filter
36 def lookup(container,key):
37 return container and container.get(key,None)
38
39 @register.filter
40 def formatted_email(address):
41 person = None
42 addrmatch = re.search('<([^>]+)>',address)
43 if addrmatch:
44 addr = addrmatch.group(1)
45 else:
46 addr = address
47 if addr:
48 persons = Person.objects.filter(email__address__in=[addr])
49 person = persons and persons[0] or None
50 if person and person.name:
51 return "%s <%s>" % (person.plain_name(), addr)
52 else:
53 return address
54
55
56 @register.simple_tag
57 def decrypt(string, request, year, plain=False):
58 try:
59 key = retrieve_nomcom_private_key(request, year)
60 except UnicodeError:
61 return f"-*- Encrypted text [Error retrieving private key, contact the secretariat ({settings.SECRETARIAT_SUPPORT_EMAIL})]"
62 if not key:
63 return '-*- Encrypted text [No private key provided] -*-'
64
65 encrypted_file = tempfile.NamedTemporaryFile(delete=False)
66 encrypted_file.write(string)
67 encrypted_file.close()
68
69 command = "%s smime -decrypt -in %s -inkey /dev/stdin"
70 code, out, error = pipe(command % (settings.OPENSSL_COMMAND,
71 encrypted_file.name), key)
72 try:
73 out = force_str(out)
74 except DjangoUnicodeDecodeError:
75 pass
76 if code != 0:
77 log("openssl error: %s:\n Error %s: %s" %(command, code, error))
78
79 os.unlink(encrypted_file.name)
80
81 if error:
82 return '-*- Encrypted text [Your private key is invalid] -*-'
83
84 if not plain:
85 return force_escape(linebreaksbr(out))
86 return mark_safe(force_escape(out))
87
[end of ietf/nomcom/templatetags/nomcom_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/nomcom/templatetags/nomcom_tags.py b/ietf/nomcom/templatetags/nomcom_tags.py
--- a/ietf/nomcom/templatetags/nomcom_tags.py
+++ b/ietf/nomcom/templatetags/nomcom_tags.py
@@ -1,8 +1,10 @@
-# Copyright The IETF Trust 2013-2019, All Rights Reserved
+# Copyright The IETF Trust 2013-2023, All Rights Reserved
import os
import tempfile
import re
+from collections import defaultdict
+
from django import template
from django.conf import settings
from django.template.defaultfilters import linebreaksbr, force_escape
@@ -84,3 +86,11 @@
if not plain:
return force_escape(linebreaksbr(out))
return mark_safe(force_escape(out))
+
[email protected]
+def feedback_totals(staterank_list):
+ totals = defaultdict(lambda: 0)
+ for fb_dict in staterank_list:
+ for fbtype_name, fbtype_count, _ in fb_dict['feedback']:
+ totals[fbtype_name] += fbtype_count
+ return totals.values()
| {"golden_diff": "diff --git a/ietf/nomcom/templatetags/nomcom_tags.py b/ietf/nomcom/templatetags/nomcom_tags.py\n--- a/ietf/nomcom/templatetags/nomcom_tags.py\n+++ b/ietf/nomcom/templatetags/nomcom_tags.py\n@@ -1,8 +1,10 @@\n-# Copyright The IETF Trust 2013-2019, All Rights Reserved\n+# Copyright The IETF Trust 2013-2023, All Rights Reserved\n import os\n import tempfile\n import re\n \n+from collections import defaultdict\n+\n from django import template\n from django.conf import settings\n from django.template.defaultfilters import linebreaksbr, force_escape\n@@ -84,3 +86,11 @@\n if not plain:\n return force_escape(linebreaksbr(out))\n return mark_safe(force_escape(out))\n+\[email protected]\n+def feedback_totals(staterank_list):\n+ totals = defaultdict(lambda: 0)\n+ for fb_dict in staterank_list:\n+ for fbtype_name, fbtype_count, _ in fb_dict['feedback']:\n+ totals[fbtype_name] += fbtype_count\n+ return totals.values()\n", "issue": "Add \"totals\" to \"view feedback\" page\n### Description\n\nIt would be useful to add totals for each column in the \"view feedback\" page.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2013-2019, All Rights Reserved\nimport os\nimport tempfile\nimport re\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import linebreaksbr, force_escape\nfrom django.utils.encoding import force_str, DjangoUnicodeDecodeError\nfrom django.utils.safestring import mark_safe\n\nimport debug # pyflakes:ignore\n\nfrom ietf.nomcom.utils import get_nomcom_by_year, retrieve_nomcom_private_key\nfrom ietf.person.models import Person\nfrom ietf.utils.log import log\nfrom ietf.utils.pipe import pipe\n\n\nregister = template.Library()\n\n\[email protected]\ndef is_chair_or_advisor(user, year):\n if not user or not year:\n return False\n nomcom = get_nomcom_by_year(year=year)\n return nomcom.group.has_role(user, [\"chair\",\"advisor\"])\n\n\[email protected]\ndef has_publickey(nomcom):\n return nomcom and nomcom.public_key and True or False\n\[email protected]\ndef lookup(container,key):\n return container and container.get(key,None)\n\[email protected]\ndef formatted_email(address):\n person = None\n addrmatch = re.search('<([^>]+)>',address)\n if addrmatch:\n addr = addrmatch.group(1)\n else:\n addr = address\n if addr:\n persons = Person.objects.filter(email__address__in=[addr])\n person = persons and persons[0] or None\n if person and person.name:\n return \"%s <%s>\" % (person.plain_name(), addr) \n else:\n return address\n\n\[email protected]_tag\ndef decrypt(string, request, year, plain=False):\n try:\n key = retrieve_nomcom_private_key(request, year)\n except UnicodeError:\n return f\"-*- Encrypted text [Error retrieving private key, contact the secretariat ({settings.SECRETARIAT_SUPPORT_EMAIL})]\"\n if not key:\n return '-*- Encrypted text [No private key provided] -*-'\n\n encrypted_file = tempfile.NamedTemporaryFile(delete=False)\n encrypted_file.write(string)\n encrypted_file.close()\n\n command = \"%s smime -decrypt -in %s -inkey /dev/stdin\"\n code, out, error = pipe(command % (settings.OPENSSL_COMMAND,\n encrypted_file.name), key)\n try:\n out = force_str(out)\n except DjangoUnicodeDecodeError:\n pass\n if code != 0:\n log(\"openssl error: %s:\\n Error %s: %s\" %(command, code, error))\n\n os.unlink(encrypted_file.name)\n\n if error:\n return '-*- Encrypted text [Your private key is invalid] -*-'\n\n if not plain:\n return force_escape(linebreaksbr(out))\n return mark_safe(force_escape(out))\n", "path": "ietf/nomcom/templatetags/nomcom_tags.py"}]} | 1,416 | 277 |
gh_patches_debug_23210 | rasdani/github-patches | git_diff | mirumee__ariadne-68 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create shortcut function for GraphQLMiddleware.make_simple_server
Getting started with Ariadne could be made even simpler by providing shortcut function abstracting the `GraphQLMiddleware` away on first contact, thus saving users possible confusion about what they really are doing.
</issue>
<code>
[start of ariadne/utils.py]
1 from graphql import parse
2
3
4 def gql(value: str) -> str:
5 parse(value)
6 return value
7
[end of ariadne/utils.py]
[start of ariadne/__init__.py]
1 from .executable_schema import make_executable_schema
2 from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to
3 from .utils import gql
4 from .wsgi_middleware import GraphQLMiddleware
5
6 __all__ = [
7 "GraphQLMiddleware",
8 "add_resolve_functions_to_schema",
9 "default_resolver",
10 "make_executable_schema",
11 "resolve_to",
12 "gql",
13 ]
14
[end of ariadne/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/__init__.py b/ariadne/__init__.py
--- a/ariadne/__init__.py
+++ b/ariadne/__init__.py
@@ -1,6 +1,6 @@
from .executable_schema import make_executable_schema
from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to
-from .utils import gql
+from .utils import gql, start_simple_server
from .wsgi_middleware import GraphQLMiddleware
__all__ = [
@@ -10,4 +10,5 @@
"make_executable_schema",
"resolve_to",
"gql",
+ "start_simple_server",
]
diff --git a/ariadne/utils.py b/ariadne/utils.py
--- a/ariadne/utils.py
+++ b/ariadne/utils.py
@@ -1,6 +1,26 @@
+from typing import List, Union
+
from graphql import parse
+from .wsgi_middleware import GraphQLMiddleware
+
def gql(value: str) -> str:
parse(value)
return value
+
+
+def start_simple_server(
+ type_defs: Union[str, List[str]],
+ resolvers: Union[dict, List[dict]],
+ host: str = "127.0.0.1",
+ port: int = 8888,
+):
+ try:
+ print("Simple GraphQL server is running on the http://%s:%s" % (host, port))
+ graphql_server = GraphQLMiddleware.make_simple_server(
+ type_defs, resolvers, host, port
+ )
+ graphql_server.serve_forever()
+ except KeyboardInterrupt:
+ pass
| {"golden_diff": "diff --git a/ariadne/__init__.py b/ariadne/__init__.py\n--- a/ariadne/__init__.py\n+++ b/ariadne/__init__.py\n@@ -1,6 +1,6 @@\n from .executable_schema import make_executable_schema\n from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\n-from .utils import gql\n+from .utils import gql, start_simple_server\n from .wsgi_middleware import GraphQLMiddleware\n \n __all__ = [\n@@ -10,4 +10,5 @@\n \"make_executable_schema\",\n \"resolve_to\",\n \"gql\",\n+ \"start_simple_server\",\n ]\ndiff --git a/ariadne/utils.py b/ariadne/utils.py\n--- a/ariadne/utils.py\n+++ b/ariadne/utils.py\n@@ -1,6 +1,26 @@\n+from typing import List, Union\n+\n from graphql import parse\n \n+from .wsgi_middleware import GraphQLMiddleware\n+\n \n def gql(value: str) -> str:\n parse(value)\n return value\n+\n+\n+def start_simple_server(\n+ type_defs: Union[str, List[str]],\n+ resolvers: Union[dict, List[dict]],\n+ host: str = \"127.0.0.1\",\n+ port: int = 8888,\n+):\n+ try:\n+ print(\"Simple GraphQL server is running on the http://%s:%s\" % (host, port))\n+ graphql_server = GraphQLMiddleware.make_simple_server(\n+ type_defs, resolvers, host, port\n+ )\n+ graphql_server.serve_forever()\n+ except KeyboardInterrupt:\n+ pass\n", "issue": "Create shortcut function for GraphQLMiddleware.make_simple_server\nGetting started with Ariadne could be made even simpler by providing shortcut function abstracting the `GraphQLMiddleware` away on first contact, thus saving users possible confusion about what they really are doing.\n", "before_files": [{"content": "from graphql import parse\n\n\ndef gql(value: str) -> str:\n parse(value)\n return value\n", "path": "ariadne/utils.py"}, {"content": "from .executable_schema import make_executable_schema\nfrom .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\nfrom .utils import gql\nfrom .wsgi_middleware import GraphQLMiddleware\n\n__all__ = [\n \"GraphQLMiddleware\",\n \"add_resolve_functions_to_schema\",\n \"default_resolver\",\n \"make_executable_schema\",\n \"resolve_to\",\n \"gql\",\n]\n", "path": "ariadne/__init__.py"}]} | 741 | 376 |
gh_patches_debug_16861 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3656 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC]: Unnecessary step to reformat questions
### 📚 The doc issue
The current documentation contains unnecessary step to reformat questions from FastChat's format to our internal format.
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Chat/evaluate/format_questions.py]
1 import argparse
2 import os
3 import json
4 import copy
5
6 from utils import jdump, get_json_list
7
8
9 def format_questions(args):
10 questions = get_json_list(args.questions_path)
11 keys=questions[0].keys()
12
13 formatted_questions=copy.deepcopy(questions)
14 for i in range(len(formatted_questions)):
15 formatted_questions[i]['instruction']=questions[i]['text']
16 formatted_questions[i]['input']=""
17 formatted_questions[i]['output']=""
18 formatted_questions[i]['id']=questions[i]['question_id']
19 for key in keys:
20 if key=="category":
21 continue
22 del formatted_questions[i][key]
23
24 jdump(formatted_questions, args.save_path)
25
26 if __name__ == '__main__':
27 parser = argparse.ArgumentParser()
28 parser.add_argument('--questions_path', type=str, default='table/question.jsonl')
29 parser.add_argument('--save_path', type=str, default="table/questions.json")
30 args = parser.parse_args()
31 format_questions(args)
[end of applications/Chat/evaluate/format_questions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/Chat/evaluate/format_questions.py b/applications/Chat/evaluate/format_questions.py
deleted file mode 100644
--- a/applications/Chat/evaluate/format_questions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import argparse
-import os
-import json
-import copy
-
-from utils import jdump, get_json_list
-
-
-def format_questions(args):
- questions = get_json_list(args.questions_path)
- keys=questions[0].keys()
-
- formatted_questions=copy.deepcopy(questions)
- for i in range(len(formatted_questions)):
- formatted_questions[i]['instruction']=questions[i]['text']
- formatted_questions[i]['input']=""
- formatted_questions[i]['output']=""
- formatted_questions[i]['id']=questions[i]['question_id']
- for key in keys:
- if key=="category":
- continue
- del formatted_questions[i][key]
-
- jdump(formatted_questions, args.save_path)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--questions_path', type=str, default='table/question.jsonl')
- parser.add_argument('--save_path', type=str, default="table/questions.json")
- args = parser.parse_args()
- format_questions(args)
\ No newline at end of file
| {"golden_diff": "diff --git a/applications/Chat/evaluate/format_questions.py b/applications/Chat/evaluate/format_questions.py\ndeleted file mode 100644\n--- a/applications/Chat/evaluate/format_questions.py\n+++ /dev/null\n@@ -1,31 +0,0 @@\n-import argparse\n-import os\n-import json\n-import copy\n-\n-from utils import jdump, get_json_list\n-\n-\n-def format_questions(args):\n- questions = get_json_list(args.questions_path)\n- keys=questions[0].keys()\n- \n- formatted_questions=copy.deepcopy(questions)\n- for i in range(len(formatted_questions)):\n- formatted_questions[i]['instruction']=questions[i]['text']\n- formatted_questions[i]['input']=\"\"\n- formatted_questions[i]['output']=\"\"\n- formatted_questions[i]['id']=questions[i]['question_id']\n- for key in keys:\n- if key==\"category\":\n- continue\n- del formatted_questions[i][key]\n- \n- jdump(formatted_questions, args.save_path)\n-\n-if __name__ == '__main__':\n- parser = argparse.ArgumentParser()\n- parser.add_argument('--questions_path', type=str, default='table/question.jsonl')\n- parser.add_argument('--save_path', type=str, default=\"table/questions.json\")\n- args = parser.parse_args()\n- format_questions(args)\n\\ No newline at end of file\n", "issue": "[DOC]: Unnecessary step to reformat questions\n### \ud83d\udcda The doc issue\n\nThe current documentation contains unnecessary step to reformat questions from FastChat's format to our internal format.\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport os\nimport json\nimport copy\n\nfrom utils import jdump, get_json_list\n\n\ndef format_questions(args):\n questions = get_json_list(args.questions_path)\n keys=questions[0].keys()\n \n formatted_questions=copy.deepcopy(questions)\n for i in range(len(formatted_questions)):\n formatted_questions[i]['instruction']=questions[i]['text']\n formatted_questions[i]['input']=\"\"\n formatted_questions[i]['output']=\"\"\n formatted_questions[i]['id']=questions[i]['question_id']\n for key in keys:\n if key==\"category\":\n continue\n del formatted_questions[i][key]\n \n jdump(formatted_questions, args.save_path)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--questions_path', type=str, default='table/question.jsonl')\n parser.add_argument('--save_path', type=str, default=\"table/questions.json\")\n args = parser.parse_args()\n format_questions(args)", "path": "applications/Chat/evaluate/format_questions.py"}]} | 856 | 299 |
gh_patches_debug_6038 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-16 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Forbid `handler` as a variable name
We need to add `handler` to our variable blacklist.
</issue>
<code>
[start of wemake_python_styleguide/constants.py]
1 # -*- coding: utf-8 -*-
2
3 BAD_FUNCTIONS = frozenset((
4 # Code generation:
5 'eval',
6 'exec',
7 'compile',
8
9 # Magic:
10 'globals',
11 'locals',
12 'vars',
13 'dir',
14
15 # IO:
16 'input',
17 'help',
18
19 # Attribute access:
20 'hasattr',
21 'delattr',
22 ))
23
24 BAD_IMPORT_FUNCTIONS = frozenset((
25 '__import__',
26 ))
27
28 BAD_MODULE_METADATA_VARIABLES = frozenset((
29 '__author__',
30 ))
31
32 BAD_VARIABLE_NAMES = frozenset((
33 'data',
34 'result',
35 'results',
36 'item',
37 'items',
38 'value',
39 'values',
40 'val',
41 'vals',
42 'var',
43 'vars',
44 'content',
45 'contents',
46 'info',
47 ))
48
49 NESTED_CLASSES_WHITELIST = frozenset((
50 'Meta',
51 ))
52
[end of wemake_python_styleguide/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/constants.py b/wemake_python_styleguide/constants.py
--- a/wemake_python_styleguide/constants.py
+++ b/wemake_python_styleguide/constants.py
@@ -19,6 +19,10 @@
# Attribute access:
'hasattr',
'delattr',
+
+ # Too generic:
+ 'handler',
+ 'handle',
))
BAD_IMPORT_FUNCTIONS = frozenset((
@@ -44,6 +48,7 @@
'content',
'contents',
'info',
+ 'handler',
))
NESTED_CLASSES_WHITELIST = frozenset((
| {"golden_diff": "diff --git a/wemake_python_styleguide/constants.py b/wemake_python_styleguide/constants.py\n--- a/wemake_python_styleguide/constants.py\n+++ b/wemake_python_styleguide/constants.py\n@@ -19,6 +19,10 @@\n # Attribute access:\n 'hasattr',\n 'delattr',\n+\n+ # Too generic:\n+ 'handler',\n+ 'handle',\n ))\n \n BAD_IMPORT_FUNCTIONS = frozenset((\n@@ -44,6 +48,7 @@\n 'content',\n 'contents',\n 'info',\n+ 'handler',\n ))\n \n NESTED_CLASSES_WHITELIST = frozenset((\n", "issue": "Forbid `handler` as a variable name\nWe need to add `handler` to our variable blacklist.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nBAD_FUNCTIONS = frozenset((\n # Code generation:\n 'eval',\n 'exec',\n 'compile',\n\n # Magic:\n 'globals',\n 'locals',\n 'vars',\n 'dir',\n\n # IO:\n 'input',\n 'help',\n\n # Attribute access:\n 'hasattr',\n 'delattr',\n))\n\nBAD_IMPORT_FUNCTIONS = frozenset((\n '__import__',\n))\n\nBAD_MODULE_METADATA_VARIABLES = frozenset((\n '__author__',\n))\n\nBAD_VARIABLE_NAMES = frozenset((\n 'data',\n 'result',\n 'results',\n 'item',\n 'items',\n 'value',\n 'values',\n 'val',\n 'vals',\n 'var',\n 'vars',\n 'content',\n 'contents',\n 'info',\n))\n\nNESTED_CLASSES_WHITELIST = frozenset((\n 'Meta',\n))\n", "path": "wemake_python_styleguide/constants.py"}]} | 860 | 144 |
gh_patches_debug_24388 | rasdani/github-patches | git_diff | pulp__pulpcore-4190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
</issue>
<code>
[start of pulpcore/app/tasks/reclaim_space.py]
1 from pulpcore.app.models import (
2 Artifact,
3 Content,
4 ContentArtifact,
5 ProgressReport,
6 PublishedMetadata,
7 Repository,
8 RepositoryVersion,
9 )
10 from pulpcore.app.util import get_domain
11
12
13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
14 """
15 This task frees-up disk space by removing Artifact files from the filesystem for Content
16 exclusive to the list of provided repos.
17
18 Note: content marked as `proctected` will be excluded from the reclaim disk space.
19
20 Kwargs:
21 repo_pks (list): A list of repo pks the disk reclaim space is performed on.
22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim
23 disk space.
24 force (bool): If True, uploaded content will be taken into account.
25
26 """
27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)
28 for repo in reclaimed_repos:
29 repo.invalidate_cache(everything=True)
30
31 domain = get_domain()
32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)
33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)
34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)
35 c_reclaim_qs = c_reclaim_qs.exclude(
36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()
37 )
38
39 if keeplist_rv_pks:
40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)
41 rv_content = Content.objects.none()
42 for rv in rv_qs.iterator():
43 rv_content |= rv.content
44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)
45
46 content_distinct = c_reclaim_qs.distinct("pulp_type")
47 unprotected = []
48 for content in content_distinct:
49 if not content.cast().PROTECTED_FROM_RECLAIM:
50 unprotected.append(content.pulp_type)
51
52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter(
53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False
54 )
55 if not force:
56 ca_qs = ca_qs.filter(remoteartifact__isnull=False)
57 artifact_pks = set()
58 ca_to_update = []
59 for ca in ca_qs.iterator():
60 if ca.content.pulp_type in unprotected:
61 artifact_pks.add(ca.artifact.pk)
62 ca.artifact = None
63 ca_to_update.append(ca)
64
65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000)
66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)
67 progress_bar = ProgressReport(
68 message="Reclaim disk space",
69 total=artifacts_to_delete.count(),
70 code="reclaim-space.artifact",
71 done=0,
72 state="running",
73 )
74 progress_bar.save()
75
76 counter = 0
77 interval = 100
78 for artifact in artifacts_to_delete.iterator():
79 # we need to manually call delete() because it cleans up the file on the filesystem
80 artifact.delete()
81 progress_bar.done += 1
82 counter += 1
83
84 if counter >= interval:
85 progress_bar.save()
86 counter = 0
87
88 progress_bar.state = "completed"
89 progress_bar.save()
90
[end of pulpcore/app/tasks/reclaim_space.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| {"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]} | 1,628 | 323 |
gh_patches_debug_35421 | rasdani/github-patches | git_diff | conan-io__conan-center-index-20134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[doxygen] Model iconv dependency as a conan package
Specify library name and version: **doxygen/1.9.2**
This resolves an issue where `iconv` wasn't being appropriately modelled as a conan dependency in the doxygen recipe. This lead to unresolved symbol errors on Macos builds with conan 2. This was previously part of https://github.com/conan-io/conan-center-index/pull/18415, but is being split to a separate PR to separate concerns and make review easier.
Closes #19903
---
- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).
- [x] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).
- [x] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.
</issue>
<code>
[start of recipes/doxygen/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
4 from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get
5 from conan.tools.microsoft import check_min_vs, is_msvc_static_runtime
6 from conan.tools.scm import Version
7 import os
8
9 required_conan_version = ">=1.52.0"
10
11
12 class DoxygenConan(ConanFile):
13 name = "doxygen"
14 description = "A documentation system for C++, C, Java, IDL and PHP --- Note: Dot is disabled in this package"
15 topics = ("installer", "devtool", "documentation")
16 homepage = "https://github.com/doxygen/doxygen"
17 license = "GPL-2.0-or-later"
18 url = "https://github.com/conan-io/conan-center-index"
19 package_type = "application"
20 settings = "os", "arch", "compiler", "build_type"
21 options = {
22 "enable_parse": [True, False],
23 "enable_search": [True, False],
24 }
25 default_options = {
26 "enable_parse": True,
27 "enable_search": True,
28 }
29
30 @property
31 def _settings_build(self):
32 return getattr(self, "settings_build", self.settings)
33
34 @property
35 def _minimum_compiler_version(self):
36 if Version(self.version) <= "1.9.1":
37 return {
38 "gcc": "5",
39 }
40 return {
41 "gcc": "7", # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66297
42 "Visual Studio": "15",
43 "msvc": "191",
44 }
45
46 def export_sources(self):
47 export_conandata_patches(self)
48
49 def layout(self):
50 cmake_layout(self, src_folder="src")
51
52 def requirements(self):
53 if self.options.enable_search:
54 self.requires("xapian-core/1.4.19")
55 self.requires("zlib/1.2.13")
56
57 def package_id(self):
58 del self.info.settings.compiler
59
60 def compatibility(self):
61 return [{"settings": [("build_type", "Release")]}]
62
63 def validate(self):
64 minimum_compiler_version = self._minimum_compiler_version.get(str(self.settings.compiler))
65 if minimum_compiler_version and Version(self.settings.compiler.version) < minimum_compiler_version:
66 raise ConanInvalidConfiguration(f"Compiler version too old. At least {minimum_compiler_version} is required.")
67 if Version(self.version) == "1.8.18":
68 check_min_vs(self, "191")
69
70 def build_requirements(self):
71 if self._settings_build.os == "Windows":
72 self.tool_requires("winflexbison/2.5.24")
73 else:
74 self.tool_requires("flex/2.6.4")
75 self.tool_requires("bison/3.8.2")
76
77 def source(self):
78 get(self, **self.conan_data["sources"][self.version], strip_root=True)
79
80 def generate(self):
81 tc = CMakeToolchain(self)
82 tc.variables["build_parse"] = self.options.enable_parse
83 tc.variables["build_search"] = self.options.enable_search
84 tc.variables["use_libc++"] = self.settings.compiler.get_safe("libcxx") == "libc++"
85 tc.variables["win_static"] = is_msvc_static_runtime(self)
86 tc.generate()
87
88 deps = CMakeDeps(self)
89 deps.generate()
90
91 def build(self):
92 apply_conandata_patches(self)
93 cmake = CMake(self)
94 cmake.configure()
95 cmake.build()
96
97 def package(self):
98 copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
99 cmake = CMake(self)
100 cmake.install()
101
102 def package_info(self):
103 self.cpp_info.set_property("cmake_find_mode", "none")
104 self.cpp_info.libdirs = []
105 self.cpp_info.includedirs = []
106
107 # TODO: to remove in conan v2
108 self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
109
[end of recipes/doxygen/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/doxygen/all/conanfile.py b/recipes/doxygen/all/conanfile.py
--- a/recipes/doxygen/all/conanfile.py
+++ b/recipes/doxygen/all/conanfile.py
@@ -21,10 +21,12 @@
options = {
"enable_parse": [True, False],
"enable_search": [True, False],
+ "enable_app": [True, False],
}
default_options = {
"enable_parse": True,
"enable_search": True,
+ "enable_app": False,
}
@property
@@ -52,10 +54,10 @@
def requirements(self):
if self.options.enable_search:
self.requires("xapian-core/1.4.19")
- self.requires("zlib/1.2.13")
-
- def package_id(self):
- del self.info.settings.compiler
+ self.requires("zlib/[>=1.2.11 <2]")
+ if self.options.enable_app or self.options.enable_parse:
+ # INFO: Doxygen uses upper case CMake variables to link/include IConv, so we are using patches for targets.
+ self.requires("libiconv/1.17")
def compatibility(self):
return [{"settings": [("build_type", "Release")]}]
@@ -81,6 +83,7 @@
tc = CMakeToolchain(self)
tc.variables["build_parse"] = self.options.enable_parse
tc.variables["build_search"] = self.options.enable_search
+ tc.variables["build_app"] = self.options.enable_app
tc.variables["use_libc++"] = self.settings.compiler.get_safe("libcxx") == "libc++"
tc.variables["win_static"] = is_msvc_static_runtime(self)
tc.generate()
@@ -103,6 +106,8 @@
self.cpp_info.set_property("cmake_find_mode", "none")
self.cpp_info.libdirs = []
self.cpp_info.includedirs = []
+ if self.settings.os in ["Linux", "FreeBSD"]:
+ self.cpp_info.system_libs = ["pthread", "m"]
# TODO: to remove in conan v2
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
| {"golden_diff": "diff --git a/recipes/doxygen/all/conanfile.py b/recipes/doxygen/all/conanfile.py\n--- a/recipes/doxygen/all/conanfile.py\n+++ b/recipes/doxygen/all/conanfile.py\n@@ -21,10 +21,12 @@\n options = {\n \"enable_parse\": [True, False],\n \"enable_search\": [True, False],\n+ \"enable_app\": [True, False],\n }\n default_options = {\n \"enable_parse\": True,\n \"enable_search\": True,\n+ \"enable_app\": False,\n }\n \n @property\n@@ -52,10 +54,10 @@\n def requirements(self):\n if self.options.enable_search:\n self.requires(\"xapian-core/1.4.19\")\n- self.requires(\"zlib/1.2.13\")\n-\n- def package_id(self):\n- del self.info.settings.compiler\n+ self.requires(\"zlib/[>=1.2.11 <2]\")\n+ if self.options.enable_app or self.options.enable_parse:\n+ # INFO: Doxygen uses upper case CMake variables to link/include IConv, so we are using patches for targets.\n+ self.requires(\"libiconv/1.17\")\n \n def compatibility(self):\n return [{\"settings\": [(\"build_type\", \"Release\")]}]\n@@ -81,6 +83,7 @@\n tc = CMakeToolchain(self)\n tc.variables[\"build_parse\"] = self.options.enable_parse\n tc.variables[\"build_search\"] = self.options.enable_search\n+ tc.variables[\"build_app\"] = self.options.enable_app\n tc.variables[\"use_libc++\"] = self.settings.compiler.get_safe(\"libcxx\") == \"libc++\"\n tc.variables[\"win_static\"] = is_msvc_static_runtime(self)\n tc.generate()\n@@ -103,6 +106,8 @@\n self.cpp_info.set_property(\"cmake_find_mode\", \"none\")\n self.cpp_info.libdirs = []\n self.cpp_info.includedirs = []\n+ if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n+ self.cpp_info.system_libs = [\"pthread\", \"m\"]\n \n # TODO: to remove in conan v2\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "issue": "[doxygen] Model iconv dependency as a conan package\nSpecify library name and version: **doxygen/1.9.2**\r\n\r\nThis resolves an issue where `iconv` wasn't being appropriately modelled as a conan dependency in the doxygen recipe. This lead to unresolved symbol errors on Macos builds with conan 2. This was previously part of https://github.com/conan-io/conan-center-index/pull/18415, but is being split to a separate PR to separate concerns and make review easier.\r\n\r\nCloses #19903 \r\n\r\n---\r\n\r\n- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).\r\n- [x] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).\r\n- [x] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get\nfrom conan.tools.microsoft import check_min_vs, is_msvc_static_runtime\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.52.0\"\n\n\nclass DoxygenConan(ConanFile):\n name = \"doxygen\"\n description = \"A documentation system for C++, C, Java, IDL and PHP --- Note: Dot is disabled in this package\"\n topics = (\"installer\", \"devtool\", \"documentation\")\n homepage = \"https://github.com/doxygen/doxygen\"\n license = \"GPL-2.0-or-later\"\n url = \"https://github.com/conan-io/conan-center-index\"\n package_type = \"application\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"enable_parse\": [True, False],\n \"enable_search\": [True, False],\n }\n default_options = {\n \"enable_parse\": True,\n \"enable_search\": True,\n }\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n @property\n def _minimum_compiler_version(self):\n if Version(self.version) <= \"1.9.1\":\n return {\n \"gcc\": \"5\",\n }\n return {\n \"gcc\": \"7\", # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66297\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\",\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def requirements(self):\n if self.options.enable_search:\n self.requires(\"xapian-core/1.4.19\")\n self.requires(\"zlib/1.2.13\")\n\n def package_id(self):\n del self.info.settings.compiler\n\n def compatibility(self):\n return [{\"settings\": [(\"build_type\", \"Release\")]}]\n\n def validate(self):\n minimum_compiler_version = self._minimum_compiler_version.get(str(self.settings.compiler))\n if minimum_compiler_version and Version(self.settings.compiler.version) < minimum_compiler_version:\n raise ConanInvalidConfiguration(f\"Compiler version too old. At least {minimum_compiler_version} is required.\")\n if Version(self.version) == \"1.8.18\":\n check_min_vs(self, \"191\")\n\n def build_requirements(self):\n if self._settings_build.os == \"Windows\":\n self.tool_requires(\"winflexbison/2.5.24\")\n else:\n self.tool_requires(\"flex/2.6.4\")\n self.tool_requires(\"bison/3.8.2\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"build_parse\"] = self.options.enable_parse\n tc.variables[\"build_search\"] = self.options.enable_search\n tc.variables[\"use_libc++\"] = self.settings.compiler.get_safe(\"libcxx\") == \"libc++\"\n tc.variables[\"win_static\"] = is_msvc_static_runtime(self)\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def build(self):\n apply_conandata_patches(self)\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_find_mode\", \"none\")\n self.cpp_info.libdirs = []\n self.cpp_info.includedirs = []\n\n # TODO: to remove in conan v2\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "path": "recipes/doxygen/all/conanfile.py"}]} | 1,924 | 508 |
gh_patches_debug_310 | rasdani/github-patches | git_diff | streamlit__streamlit-7454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A header with Japanese text has no anchor link.
### Summary
I found that a header with Japanese text has no anchor link.
### Steps to reproduce
Code snippet:
```
import streamlit as st
st.header("セクション")
```
1. Run code snippet above.
2. Check if the header has anchor link or not.
**Expected behavior:**
The header ("セクション") has anchor link.
**Actual behavior:**
The header ("セクション") has no anchor link.
### Is this a regression?
No
### Debug info
- Streamlit version: Streamlit, version 1.10.0
- Python version: Python 3.8.10
- Using Conda
- OS version: Ubuntu 20.04.4 LTS
- Browser version: Chrome / Version 104.0.5112.101 (Official Build) (x86_64)
### Additional information
A header with Korean text or Chinese text also has no anchor link.
</issue>
<code>
[start of e2e/scripts/st_title.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 st.title("This title is awesome!")
18 st.title("This title is awesome too!", anchor="awesome-title")
19
[end of e2e/scripts/st_title.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_title.py b/e2e/scripts/st_title.py
--- a/e2e/scripts/st_title.py
+++ b/e2e/scripts/st_title.py
@@ -16,3 +16,6 @@
st.title("This title is awesome!")
st.title("This title is awesome too!", anchor="awesome-title")
+
+st.title("日本語タイトル")
+st.title("その他の邦題", anchor="アンカー")
| {"golden_diff": "diff --git a/e2e/scripts/st_title.py b/e2e/scripts/st_title.py\n--- a/e2e/scripts/st_title.py\n+++ b/e2e/scripts/st_title.py\n@@ -16,3 +16,6 @@\n \n st.title(\"This title is awesome!\")\n st.title(\"This title is awesome too!\", anchor=\"awesome-title\")\n+\n+st.title(\"\u65e5\u672c\u8a9e\u30bf\u30a4\u30c8\u30eb\")\n+st.title(\"\u305d\u306e\u4ed6\u306e\u90a6\u984c\", anchor=\"\u30a2\u30f3\u30ab\u30fc\")\n", "issue": "A header with Japanese text has no anchor link.\n### Summary\r\n\r\nI found that a header with Japanese text has no anchor link.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\nimport streamlit as st\r\nst.header(\"\u30bb\u30af\u30b7\u30e7\u30f3\")\r\n```\r\n\r\n1. Run code snippet above.\r\n2. Check if the header has anchor link or not.\r\n\r\n**Expected behavior:**\r\n\r\nThe header (\"\u30bb\u30af\u30b7\u30e7\u30f3\") has anchor link.\r\n\r\n**Actual behavior:**\r\n\r\nThe header (\"\u30bb\u30af\u30b7\u30e7\u30f3\") has no anchor link.\r\n\r\n### Is this a regression?\r\n\r\nNo\r\n\r\n### Debug info\r\n\r\n- Streamlit version: Streamlit, version 1.10.0\r\n- Python version: Python 3.8.10\r\n- Using Conda\r\n- OS version: Ubuntu 20.04.4 LTS\r\n- Browser version: Chrome / Version 104.0.5112.101 (Official Build) (x86_64)\r\n\r\n### Additional information\r\n\r\nA header with Korean text or Chinese text also has no anchor link.\r\n\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.title(\"This title is awesome!\")\nst.title(\"This title is awesome too!\", anchor=\"awesome-title\")\n", "path": "e2e/scripts/st_title.py"}]} | 969 | 99 |
gh_patches_debug_41324 | rasdani/github-patches | git_diff | tough-dev-school__education-backend-885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Админка: по-умолчанию показывать только включенные промокоды
У нас накопилась огромная гора промокодов, и теперь при первом входе в админку сложно ориентироваться. Нужно сделать, чтобы по-умолчанию в админке показывались только включенные промокоды.
</issue>
<code>
[start of src/app/admin/filters.py]
1 from django.contrib import admin
2 from django.utils.translation import gettext_lazy as _
3
4
5 class BooleanFilter(admin.SimpleListFilter):
6 """
7 Abstract base class for simple boolean filter in admin. You should define only
8 `title`, unique `parameter_name` and two methods: `t` and `f`, returning a queryset
9 when filter is set to True and False respectively:
10 class HasClassesFilter(BooleanFilter):
11 title = _('Has classes')
12 parameter_name = 'has_classes'
13 def t(self, request, queryset):
14 return queryset.filter(classes__isnull=False).distinct('pk')
15 def n(self, request, queryset):
16 return queryset.filter(classes__isnull=True)
17 """
18 def lookups(self, request, model_admin):
19 return (
20 ('t', _('Yes')),
21 ('f', _('No')),
22 )
23
24 def queryset(self, request, queryset):
25 if not self.value():
26 return queryset
27 else:
28 if self.value() == 't':
29 return self.t(request, queryset)
30 else:
31 return self.f(request, queryset)
32
[end of src/app/admin/filters.py]
[start of src/orders/admin/promocodes/admin.py]
1 from django.urls import reverse
2 from django.utils.safestring import mark_safe
3 from django.utils.translation import gettext_lazy as _
4
5 from app.admin import ModelAdmin, admin
6 from orders.admin.promocodes import actions
7 from orders.models import PromoCode
8
9
10 @admin.register(PromoCode)
11 class PromoCodeAdmin(ModelAdmin):
12 list_display = [
13 'id',
14 'name',
15 'discount_percent',
16 'discount_value',
17 'order_count',
18 'comment',
19 'active',
20 ]
21
22 list_editable = [
23 'active',
24 ]
25
26 list_filter = [
27 'active',
28 ]
29
30 actions = [actions.deactivate]
31
32 def get_queryset(self, request):
33 return super().get_queryset(request) \
34 .with_order_count()
35
36 @mark_safe
37 @admin.display(description=_('Order count'), ordering='order_count')
38 def order_count(self, obj=None):
39 if hasattr(obj, 'order_count') and obj.order_count:
40 orders_url = reverse('admin:orders_order_changelist')
41 return f'<a href="{orders_url}?is_paid=t&promocode_id={obj.id}">{obj.order_count}</a>'
42
43 return '—'
44
[end of src/orders/admin/promocodes/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/app/admin/filters.py b/src/app/admin/filters.py
--- a/src/app/admin/filters.py
+++ b/src/app/admin/filters.py
@@ -12,7 +12,7 @@
parameter_name = 'has_classes'
def t(self, request, queryset):
return queryset.filter(classes__isnull=False).distinct('pk')
- def n(self, request, queryset):
+ def f(self, request, queryset):
return queryset.filter(classes__isnull=True)
"""
def lookups(self, request, model_admin):
@@ -24,8 +24,22 @@
def queryset(self, request, queryset):
if not self.value():
return queryset
- else:
- if self.value() == 't':
- return self.t(request, queryset)
- else:
- return self.f(request, queryset)
+
+ if self.value() == 't':
+ return self.t(request, queryset)
+
+ return self.f(request, queryset)
+
+
+class DefaultTrueBooleanFilter(BooleanFilter):
+ def queryset(self, request, queryset):
+ if not self.value() or self.value() == 't':
+ return self.t(request, queryset)
+
+ return self.f(request, queryset)
+
+
+__all__ = [
+ 'BooleanFilter',
+ 'DefaultTrueBooleanFilter',
+]
diff --git a/src/orders/admin/promocodes/admin.py b/src/orders/admin/promocodes/admin.py
--- a/src/orders/admin/promocodes/admin.py
+++ b/src/orders/admin/promocodes/admin.py
@@ -1,31 +1,45 @@
-from django.urls import reverse
-from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from app.admin import ModelAdmin, admin
+from app.admin.filters import DefaultTrueBooleanFilter
from orders.admin.promocodes import actions
from orders.models import PromoCode
+class PromodeActiveFilter(DefaultTrueBooleanFilter):
+ title = _('Active')
+ parameter_name = 'is_active'
+
+ def t(self, request, queryset):
+ return queryset.filter(active=True)
+
+ def f(self, request, queryset):
+ return queryset.filter(active=False)
+
+
@admin.register(PromoCode)
class PromoCodeAdmin(ModelAdmin):
- list_display = [
+ list_display = (
'id',
'name',
- 'discount_percent',
- 'discount_value',
+ 'discount',
'order_count',
'comment',
'active',
- ]
+ )
list_editable = [
'active',
]
- list_filter = [
- 'active',
- ]
+ list_filter = (
+ PromodeActiveFilter,
+ )
+
+ list_display_links = (
+ 'id',
+ 'name',
+ )
actions = [actions.deactivate]
@@ -33,11 +47,19 @@
return super().get_queryset(request) \
.with_order_count()
- @mark_safe
@admin.display(description=_('Order count'), ordering='order_count')
- def order_count(self, obj=None):
+ def order_count(self, obj: PromoCode | None = None) -> str:
if hasattr(obj, 'order_count') and obj.order_count:
- orders_url = reverse('admin:orders_order_changelist')
- return f'<a href="{orders_url}?is_paid=t&promocode_id={obj.id}">{obj.order_count}</a>'
+ return str(obj.order_count)
return '—'
+
+ @admin.display(description=_('Discount'), ordering='discount_percent')
+ def discount(self, obj: PromoCode | None = None) -> str:
+ if not obj:
+ return '—'
+
+ if obj.discount_value is not None:
+ return f'{obj.discount_value} ₽'
+
+ return f'{obj.discount_percent} %'
| {"golden_diff": "diff --git a/src/app/admin/filters.py b/src/app/admin/filters.py\n--- a/src/app/admin/filters.py\n+++ b/src/app/admin/filters.py\n@@ -12,7 +12,7 @@\n parameter_name = 'has_classes'\n def t(self, request, queryset):\n return queryset.filter(classes__isnull=False).distinct('pk')\n- def n(self, request, queryset):\n+ def f(self, request, queryset):\n return queryset.filter(classes__isnull=True)\n \"\"\"\n def lookups(self, request, model_admin):\n@@ -24,8 +24,22 @@\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n- else:\n- if self.value() == 't':\n- return self.t(request, queryset)\n- else:\n- return self.f(request, queryset)\n+\n+ if self.value() == 't':\n+ return self.t(request, queryset)\n+\n+ return self.f(request, queryset)\n+\n+\n+class DefaultTrueBooleanFilter(BooleanFilter):\n+ def queryset(self, request, queryset):\n+ if not self.value() or self.value() == 't':\n+ return self.t(request, queryset)\n+\n+ return self.f(request, queryset)\n+\n+\n+__all__ = [\n+ 'BooleanFilter',\n+ 'DefaultTrueBooleanFilter',\n+]\ndiff --git a/src/orders/admin/promocodes/admin.py b/src/orders/admin/promocodes/admin.py\n--- a/src/orders/admin/promocodes/admin.py\n+++ b/src/orders/admin/promocodes/admin.py\n@@ -1,31 +1,45 @@\n-from django.urls import reverse\n-from django.utils.safestring import mark_safe\n from django.utils.translation import gettext_lazy as _\n \n from app.admin import ModelAdmin, admin\n+from app.admin.filters import DefaultTrueBooleanFilter\n from orders.admin.promocodes import actions\n from orders.models import PromoCode\n \n \n+class PromodeActiveFilter(DefaultTrueBooleanFilter):\n+ title = _('Active')\n+ parameter_name = 'is_active'\n+\n+ def t(self, request, queryset):\n+ return queryset.filter(active=True)\n+\n+ def f(self, request, queryset):\n+ return queryset.filter(active=False)\n+\n+\n @admin.register(PromoCode)\n class PromoCodeAdmin(ModelAdmin):\n- list_display = [\n+ list_display = (\n 'id',\n 'name',\n- 'discount_percent',\n- 'discount_value',\n+ 'discount',\n 'order_count',\n 'comment',\n 'active',\n- ]\n+ )\n \n list_editable = [\n 'active',\n ]\n \n- list_filter = [\n- 'active',\n- ]\n+ list_filter = (\n+ PromodeActiveFilter,\n+ )\n+\n+ list_display_links = (\n+ 'id',\n+ 'name',\n+ )\n \n actions = [actions.deactivate]\n \n@@ -33,11 +47,19 @@\n return super().get_queryset(request) \\\n .with_order_count()\n \n- @mark_safe\n @admin.display(description=_('Order count'), ordering='order_count')\n- def order_count(self, obj=None):\n+ def order_count(self, obj: PromoCode | None = None) -> str:\n if hasattr(obj, 'order_count') and obj.order_count:\n- orders_url = reverse('admin:orders_order_changelist')\n- return f'<a href=\"{orders_url}?is_paid=t&promocode_id={obj.id}\">{obj.order_count}</a>'\n+ return str(obj.order_count)\n \n return '\u2014'\n+\n+ @admin.display(description=_('Discount'), ordering='discount_percent')\n+ def discount(self, obj: PromoCode | None = None) -> str:\n+ if not obj:\n+ return '\u2014'\n+\n+ if obj.discount_value is not None:\n+ return f'{obj.discount_value} \u20bd'\n+\n+ return f'{obj.discount_percent} %'\n", "issue": "\u0410\u0434\u043c\u0438\u043d\u043a\u0430: \u043f\u043e-\u0443\u043c\u043e\u043b\u0447\u0430\u043d\u0438\u044e \u043f\u043e\u043a\u0430\u0437\u044b\u0432\u0430\u0442\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u0432\u043a\u043b\u044e\u0447\u0435\u043d\u043d\u044b\u0435 \u043f\u0440\u043e\u043c\u043e\u043a\u043e\u0434\u044b\n\u0423 \u043d\u0430\u0441 \u043d\u0430\u043a\u043e\u043f\u0438\u043b\u0430\u0441\u044c \u043e\u0433\u0440\u043e\u043c\u043d\u0430\u044f \u0433\u043e\u0440\u0430 \u043f\u0440\u043e\u043c\u043e\u043a\u043e\u0434\u043e\u0432, \u0438 \u0442\u0435\u043f\u0435\u0440\u044c \u043f\u0440\u0438 \u043f\u0435\u0440\u0432\u043e\u043c \u0432\u0445\u043e\u0434\u0435 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443 \u0441\u043b\u043e\u0436\u043d\u043e \u043e\u0440\u0438\u0435\u043d\u0442\u0438\u0440\u043e\u0432\u0430\u0442\u044c\u0441\u044f. \u041d\u0443\u0436\u043d\u043e \u0441\u0434\u0435\u043b\u0430\u0442\u044c, \u0447\u0442\u043e\u0431\u044b \u043f\u043e-\u0443\u043c\u043e\u043b\u0447\u0430\u043d\u0438\u044e \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0435 \u043f\u043e\u043a\u0430\u0437\u044b\u0432\u0430\u043b\u0438\u0441\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u0432\u043a\u043b\u044e\u0447\u0435\u043d\u043d\u044b\u0435 \u043f\u0440\u043e\u043c\u043e\u043a\u043e\u0434\u044b.\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass BooleanFilter(admin.SimpleListFilter):\n \"\"\"\n Abstract base class for simple boolean filter in admin. You should define only\n `title`, unique `parameter_name` and two methods: `t` and `f`, returning a queryset\n when filter is set to True and False respectively:\n class HasClassesFilter(BooleanFilter):\n title = _('Has classes')\n parameter_name = 'has_classes'\n def t(self, request, queryset):\n return queryset.filter(classes__isnull=False).distinct('pk')\n def n(self, request, queryset):\n return queryset.filter(classes__isnull=True)\n \"\"\"\n def lookups(self, request, model_admin):\n return (\n ('t', _('Yes')),\n ('f', _('No')),\n )\n\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n else:\n if self.value() == 't':\n return self.t(request, queryset)\n else:\n return self.f(request, queryset)\n", "path": "src/app/admin/filters.py"}, {"content": "from django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom app.admin import ModelAdmin, admin\nfrom orders.admin.promocodes import actions\nfrom orders.models import PromoCode\n\n\[email protected](PromoCode)\nclass PromoCodeAdmin(ModelAdmin):\n list_display = [\n 'id',\n 'name',\n 'discount_percent',\n 'discount_value',\n 'order_count',\n 'comment',\n 'active',\n ]\n\n list_editable = [\n 'active',\n ]\n\n list_filter = [\n 'active',\n ]\n\n actions = [actions.deactivate]\n\n def get_queryset(self, request):\n return super().get_queryset(request) \\\n .with_order_count()\n\n @mark_safe\n @admin.display(description=_('Order count'), ordering='order_count')\n def order_count(self, obj=None):\n if hasattr(obj, 'order_count') and obj.order_count:\n orders_url = reverse('admin:orders_order_changelist')\n return f'<a href=\"{orders_url}?is_paid=t&promocode_id={obj.id}\">{obj.order_count}</a>'\n\n return '\u2014'\n", "path": "src/orders/admin/promocodes/admin.py"}]} | 1,261 | 863 |
gh_patches_debug_56268 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4910 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Validate profile fields on form
Related code
https://github.com/rtfd/readthedocs.org/blob/164800694a25d769234c6e7019c483f347fe9226/readthedocs/core/forms.py#L20-L46
This will raise an exception if the length is greater than the model
Sentry issue https://sentry.io/read-the-docs/readthedocs-org/issues/666774301/
</issue>
<code>
[start of readthedocs/core/forms.py]
1 # -*- coding: utf-8 -*-
2 """Forms for core app."""
3
4 from __future__ import (
5 absolute_import, division, print_function, unicode_literals)
6
7 import logging
8 from builtins import object
9
10 from django import forms
11 from django.contrib.auth.models import User
12 from django.forms.fields import CharField
13 from django.utils.translation import ugettext_lazy as _
14
15 from .models import UserProfile
16
17 log = logging.getLogger(__name__)
18
19
20 class UserProfileForm(forms.ModelForm):
21 first_name = CharField(label=_('First name'), required=False)
22 last_name = CharField(label=_('Last name'), required=False)
23
24 class Meta(object):
25 model = UserProfile
26 # Don't allow users edit someone else's user page
27 fields = ['first_name', 'last_name', 'homepage']
28
29 def __init__(self, *args, **kwargs):
30 super(UserProfileForm, self).__init__(*args, **kwargs)
31 try:
32 self.fields['first_name'].initial = self.instance.user.first_name
33 self.fields['last_name'].initial = self.instance.user.last_name
34 except AttributeError:
35 pass
36
37 def save(self, commit=True):
38 first_name = self.cleaned_data.pop('first_name', None)
39 last_name = self.cleaned_data.pop('last_name', None)
40 profile = super(UserProfileForm, self).save(commit=commit)
41 if commit:
42 user = profile.user
43 user.first_name = first_name
44 user.last_name = last_name
45 user.save()
46 return profile
47
48
49 class UserDeleteForm(forms.ModelForm):
50 username = CharField(
51 label=_('Username'),
52 help_text=_('Please type your username to confirm.'),
53 )
54
55 class Meta(object):
56 model = User
57 fields = ['username']
58
59 def clean_username(self):
60 data = self.cleaned_data['username']
61
62 if self.instance.username != data:
63 raise forms.ValidationError(_('Username does not match!'))
64
65 return data
66
67
68 class UserAdvertisingForm(forms.ModelForm):
69 class Meta(object):
70 model = UserProfile
71 fields = ['allow_ads']
72
73
74 class FacetField(forms.MultipleChoiceField):
75
76 """
77 For filtering searches on a facet.
78
79 Has validation for the format of facet values.
80 """
81
82 def valid_value(self, value):
83 """
84 Although this is a choice field, no choices need to be supplied.
85
86 Instead, we just validate that the value is in the correct format for
87 facet filtering (facet_name:value)
88 """
89 if ':' not in value:
90 return False
91 return True
92
[end of readthedocs/core/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/core/forms.py b/readthedocs/core/forms.py
--- a/readthedocs/core/forms.py
+++ b/readthedocs/core/forms.py
@@ -18,8 +18,8 @@
class UserProfileForm(forms.ModelForm):
- first_name = CharField(label=_('First name'), required=False)
- last_name = CharField(label=_('Last name'), required=False)
+ first_name = CharField(label=_('First name'), required=False, max_length=30)
+ last_name = CharField(label=_('Last name'), required=False, max_length=30)
class Meta(object):
model = UserProfile
| {"golden_diff": "diff --git a/readthedocs/core/forms.py b/readthedocs/core/forms.py\n--- a/readthedocs/core/forms.py\n+++ b/readthedocs/core/forms.py\n@@ -18,8 +18,8 @@\n \n \n class UserProfileForm(forms.ModelForm):\n- first_name = CharField(label=_('First name'), required=False)\n- last_name = CharField(label=_('Last name'), required=False)\n+ first_name = CharField(label=_('First name'), required=False, max_length=30)\n+ last_name = CharField(label=_('Last name'), required=False, max_length=30)\n \n class Meta(object):\n model = UserProfile\n", "issue": "Validate profile fields on form\nRelated code\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/164800694a25d769234c6e7019c483f347fe9226/readthedocs/core/forms.py#L20-L46\r\n\r\nThis will raise an exception if the length is greater than the model\r\n\r\nSentry issue https://sentry.io/read-the-docs/readthedocs-org/issues/666774301/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Forms for core app.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport logging\nfrom builtins import object\n\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.forms.fields import CharField\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import UserProfile\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfileForm(forms.ModelForm):\n first_name = CharField(label=_('First name'), required=False)\n last_name = CharField(label=_('Last name'), required=False)\n\n class Meta(object):\n model = UserProfile\n # Don't allow users edit someone else's user page\n fields = ['first_name', 'last_name', 'homepage']\n\n def __init__(self, *args, **kwargs):\n super(UserProfileForm, self).__init__(*args, **kwargs)\n try:\n self.fields['first_name'].initial = self.instance.user.first_name\n self.fields['last_name'].initial = self.instance.user.last_name\n except AttributeError:\n pass\n\n def save(self, commit=True):\n first_name = self.cleaned_data.pop('first_name', None)\n last_name = self.cleaned_data.pop('last_name', None)\n profile = super(UserProfileForm, self).save(commit=commit)\n if commit:\n user = profile.user\n user.first_name = first_name\n user.last_name = last_name\n user.save()\n return profile\n\n\nclass UserDeleteForm(forms.ModelForm):\n username = CharField(\n label=_('Username'),\n help_text=_('Please type your username to confirm.'),\n )\n\n class Meta(object):\n model = User\n fields = ['username']\n\n def clean_username(self):\n data = self.cleaned_data['username']\n\n if self.instance.username != data:\n raise forms.ValidationError(_('Username does not match!'))\n\n return data\n\n\nclass UserAdvertisingForm(forms.ModelForm):\n class Meta(object):\n model = UserProfile\n fields = ['allow_ads']\n\n\nclass FacetField(forms.MultipleChoiceField):\n\n \"\"\"\n For filtering searches on a facet.\n\n Has validation for the format of facet values.\n \"\"\"\n\n def valid_value(self, value):\n \"\"\"\n Although this is a choice field, no choices need to be supplied.\n\n Instead, we just validate that the value is in the correct format for\n facet filtering (facet_name:value)\n \"\"\"\n if ':' not in value:\n return False\n return True\n", "path": "readthedocs/core/forms.py"}]} | 1,370 | 139 |
gh_patches_debug_34769 | rasdani/github-patches | git_diff | napari__napari-3016 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Opacity slider label should be between 0 and 1
## 🐛 Bug
Opacity slider label should be between 0 and 1 not 0 and 100. This will remove need for normalization on slider https://github.com/napari/napari/blob/aade148d8e5cb339bb2981ab4d1081ae5d2747e0/napari/_qt/layer_controls/qt_layer_controls_base.py#L79

</issue>
<code>
[start of napari/_qt/layer_controls/qt_layer_controls_base.py]
1 from qtpy.QtCore import Qt
2 from qtpy.QtWidgets import QComboBox, QFrame, QGridLayout
3 from superqt import QLabeledSlider as QSlider
4
5 from ...layers.base._base_constants import BLENDING_TRANSLATIONS
6 from ...utils.events import disconnect_events
7
8
9 class QtLayerControls(QFrame):
10 """Superclass for all the other LayerControl classes.
11
12 This class is never directly instantiated anywhere.
13
14 Parameters
15 ----------
16 layer : napari.layers.Layer
17 An instance of a napari layer.
18
19 Attributes
20 ----------
21 blendComboBox : qtpy.QtWidgets.QComboBox
22 Drowpdown widget to select blending mode of layer.
23 grid_layout : qtpy.QtWidgets.QGridLayout
24 Layout of Qt widget controls for the layer.
25 layer : napari.layers.Layer
26 An instance of a napari layer.
27 opacitySlider : qtpy.QtWidgets.QSlider
28 Slider controlling opacity of the layer.
29 """
30
31 def __init__(self, layer):
32 super().__init__()
33
34 self.layer = layer
35 self.layer.events.blending.connect(self._on_blending_change)
36 self.layer.events.opacity.connect(self._on_opacity_change)
37
38 self.setAttribute(Qt.WA_DeleteOnClose)
39
40 self.setObjectName('layer')
41 self.setMouseTracking(True)
42
43 self.grid_layout = QGridLayout(self)
44 self.grid_layout.setContentsMargins(0, 0, 0, 0)
45 self.grid_layout.setSpacing(2)
46 self.grid_layout.setColumnMinimumWidth(0, 86)
47 self.grid_layout.setColumnStretch(1, 1)
48 self.setLayout(self.grid_layout)
49
50 sld = QSlider(Qt.Horizontal, parent=self)
51 sld.setFocusPolicy(Qt.NoFocus)
52 sld.setMinimum(0)
53 sld.setMaximum(100)
54 sld.setSingleStep(1)
55 sld.valueChanged.connect(self.changeOpacity)
56 self.opacitySlider = sld
57 self._on_opacity_change()
58
59 blend_comboBox = QComboBox(self)
60 for index, (data, text) in enumerate(BLENDING_TRANSLATIONS.items()):
61 data = data.value
62 blend_comboBox.addItem(text, data)
63 if data == self.layer.blending:
64 blend_comboBox.setCurrentIndex(index)
65
66 blend_comboBox.activated[str].connect(self.changeBlending)
67 self.blendComboBox = blend_comboBox
68
69 def changeOpacity(self, value):
70 """Change opacity value on the layer model.
71
72 Parameters
73 ----------
74 value : float
75 Opacity value for shapes.
76 Input range 0 - 100 (transparent to fully opaque).
77 """
78 with self.layer.events.blocker(self._on_opacity_change):
79 self.layer.opacity = value / 100
80
81 def changeBlending(self, text):
82 """Change blending mode on the layer model.
83
84 Parameters
85 ----------
86 text : str
87 Name of blending mode, eg: 'translucent', 'additive', 'opaque'.
88 """
89 self.layer.blending = self.blendComboBox.currentData()
90
91 def _on_opacity_change(self, event=None):
92 """Receive layer model opacity change event and update opacity slider.
93
94 Parameters
95 ----------
96 event : napari.utils.event.Event, optional
97 The napari event that triggered this method, by default None.
98 """
99 with self.layer.events.opacity.blocker():
100 self.opacitySlider.setValue(int(self.layer.opacity * 100))
101
102 def _on_blending_change(self, event=None):
103 """Receive layer model blending mode change event and update slider.
104
105 Parameters
106 ----------
107 event : napari.utils.event.Event, optional
108 The napari event that triggered this method, by default None.
109 """
110 with self.layer.events.blending.blocker():
111 self.blendComboBox.setCurrentIndex(
112 self.blendComboBox.findData(self.layer.blending)
113 )
114
115 def close(self):
116 """Disconnect events when widget is closing."""
117 disconnect_events(self.layer.events, self)
118 for child in self.children():
119 close_method = getattr(child, 'close', None)
120 if close_method is not None:
121 close_method()
122 super().close()
123
[end of napari/_qt/layer_controls/qt_layer_controls_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/_qt/layer_controls/qt_layer_controls_base.py b/napari/_qt/layer_controls/qt_layer_controls_base.py
--- a/napari/_qt/layer_controls/qt_layer_controls_base.py
+++ b/napari/_qt/layer_controls/qt_layer_controls_base.py
@@ -1,6 +1,6 @@
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QComboBox, QFrame, QGridLayout
-from superqt import QLabeledSlider as QSlider
+from superqt import QLabeledDoubleSlider
from ...layers.base._base_constants import BLENDING_TRANSLATIONS
from ...utils.events import disconnect_events
@@ -47,11 +47,11 @@
self.grid_layout.setColumnStretch(1, 1)
self.setLayout(self.grid_layout)
- sld = QSlider(Qt.Horizontal, parent=self)
+ sld = QLabeledDoubleSlider(Qt.Horizontal, parent=self)
sld.setFocusPolicy(Qt.NoFocus)
sld.setMinimum(0)
- sld.setMaximum(100)
- sld.setSingleStep(1)
+ sld.setMaximum(1)
+ sld.setSingleStep(0.01)
sld.valueChanged.connect(self.changeOpacity)
self.opacitySlider = sld
self._on_opacity_change()
@@ -76,7 +76,7 @@
Input range 0 - 100 (transparent to fully opaque).
"""
with self.layer.events.blocker(self._on_opacity_change):
- self.layer.opacity = value / 100
+ self.layer.opacity = value
def changeBlending(self, text):
"""Change blending mode on the layer model.
@@ -97,7 +97,7 @@
The napari event that triggered this method, by default None.
"""
with self.layer.events.opacity.blocker():
- self.opacitySlider.setValue(int(self.layer.opacity * 100))
+ self.opacitySlider.setValue(self.layer.opacity)
def _on_blending_change(self, event=None):
"""Receive layer model blending mode change event and update slider.
| {"golden_diff": "diff --git a/napari/_qt/layer_controls/qt_layer_controls_base.py b/napari/_qt/layer_controls/qt_layer_controls_base.py\n--- a/napari/_qt/layer_controls/qt_layer_controls_base.py\n+++ b/napari/_qt/layer_controls/qt_layer_controls_base.py\n@@ -1,6 +1,6 @@\n from qtpy.QtCore import Qt\n from qtpy.QtWidgets import QComboBox, QFrame, QGridLayout\n-from superqt import QLabeledSlider as QSlider\n+from superqt import QLabeledDoubleSlider\n \n from ...layers.base._base_constants import BLENDING_TRANSLATIONS\n from ...utils.events import disconnect_events\n@@ -47,11 +47,11 @@\n self.grid_layout.setColumnStretch(1, 1)\n self.setLayout(self.grid_layout)\n \n- sld = QSlider(Qt.Horizontal, parent=self)\n+ sld = QLabeledDoubleSlider(Qt.Horizontal, parent=self)\n sld.setFocusPolicy(Qt.NoFocus)\n sld.setMinimum(0)\n- sld.setMaximum(100)\n- sld.setSingleStep(1)\n+ sld.setMaximum(1)\n+ sld.setSingleStep(0.01)\n sld.valueChanged.connect(self.changeOpacity)\n self.opacitySlider = sld\n self._on_opacity_change()\n@@ -76,7 +76,7 @@\n Input range 0 - 100 (transparent to fully opaque).\n \"\"\"\n with self.layer.events.blocker(self._on_opacity_change):\n- self.layer.opacity = value / 100\n+ self.layer.opacity = value\n \n def changeBlending(self, text):\n \"\"\"Change blending mode on the layer model.\n@@ -97,7 +97,7 @@\n The napari event that triggered this method, by default None.\n \"\"\"\n with self.layer.events.opacity.blocker():\n- self.opacitySlider.setValue(int(self.layer.opacity * 100))\n+ self.opacitySlider.setValue(self.layer.opacity)\n \n def _on_blending_change(self, event=None):\n \"\"\"Receive layer model blending mode change event and update slider.\n", "issue": "Opacity slider label should be between 0 and 1\n## \ud83d\udc1b Bug\r\n\r\nOpacity slider label should be between 0 and 1 not 0 and 100. This will remove need for normalization on slider https://github.com/napari/napari/blob/aade148d8e5cb339bb2981ab4d1081ae5d2747e0/napari/_qt/layer_controls/qt_layer_controls_base.py#L79\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QComboBox, QFrame, QGridLayout\nfrom superqt import QLabeledSlider as QSlider\n\nfrom ...layers.base._base_constants import BLENDING_TRANSLATIONS\nfrom ...utils.events import disconnect_events\n\n\nclass QtLayerControls(QFrame):\n \"\"\"Superclass for all the other LayerControl classes.\n\n This class is never directly instantiated anywhere.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n An instance of a napari layer.\n\n Attributes\n ----------\n blendComboBox : qtpy.QtWidgets.QComboBox\n Drowpdown widget to select blending mode of layer.\n grid_layout : qtpy.QtWidgets.QGridLayout\n Layout of Qt widget controls for the layer.\n layer : napari.layers.Layer\n An instance of a napari layer.\n opacitySlider : qtpy.QtWidgets.QSlider\n Slider controlling opacity of the layer.\n \"\"\"\n\n def __init__(self, layer):\n super().__init__()\n\n self.layer = layer\n self.layer.events.blending.connect(self._on_blending_change)\n self.layer.events.opacity.connect(self._on_opacity_change)\n\n self.setAttribute(Qt.WA_DeleteOnClose)\n\n self.setObjectName('layer')\n self.setMouseTracking(True)\n\n self.grid_layout = QGridLayout(self)\n self.grid_layout.setContentsMargins(0, 0, 0, 0)\n self.grid_layout.setSpacing(2)\n self.grid_layout.setColumnMinimumWidth(0, 86)\n self.grid_layout.setColumnStretch(1, 1)\n self.setLayout(self.grid_layout)\n\n sld = QSlider(Qt.Horizontal, parent=self)\n sld.setFocusPolicy(Qt.NoFocus)\n sld.setMinimum(0)\n sld.setMaximum(100)\n sld.setSingleStep(1)\n sld.valueChanged.connect(self.changeOpacity)\n self.opacitySlider = sld\n self._on_opacity_change()\n\n blend_comboBox = QComboBox(self)\n for index, (data, text) in enumerate(BLENDING_TRANSLATIONS.items()):\n data = data.value\n blend_comboBox.addItem(text, data)\n if data == self.layer.blending:\n blend_comboBox.setCurrentIndex(index)\n\n blend_comboBox.activated[str].connect(self.changeBlending)\n self.blendComboBox = blend_comboBox\n\n def changeOpacity(self, value):\n \"\"\"Change opacity value on the layer model.\n\n Parameters\n ----------\n value : float\n Opacity value for shapes.\n Input range 0 - 100 (transparent to fully opaque).\n \"\"\"\n with self.layer.events.blocker(self._on_opacity_change):\n self.layer.opacity = value / 100\n\n def changeBlending(self, text):\n \"\"\"Change blending mode on the layer model.\n\n Parameters\n ----------\n text : str\n Name of blending mode, eg: 'translucent', 'additive', 'opaque'.\n \"\"\"\n self.layer.blending = self.blendComboBox.currentData()\n\n def _on_opacity_change(self, event=None):\n \"\"\"Receive layer model opacity change event and update opacity slider.\n\n Parameters\n ----------\n event : napari.utils.event.Event, optional\n The napari event that triggered this method, by default None.\n \"\"\"\n with self.layer.events.opacity.blocker():\n self.opacitySlider.setValue(int(self.layer.opacity * 100))\n\n def _on_blending_change(self, event=None):\n \"\"\"Receive layer model blending mode change event and update slider.\n\n Parameters\n ----------\n event : napari.utils.event.Event, optional\n The napari event that triggered this method, by default None.\n \"\"\"\n with self.layer.events.blending.blocker():\n self.blendComboBox.setCurrentIndex(\n self.blendComboBox.findData(self.layer.blending)\n )\n\n def close(self):\n \"\"\"Disconnect events when widget is closing.\"\"\"\n disconnect_events(self.layer.events, self)\n for child in self.children():\n close_method = getattr(child, 'close', None)\n if close_method is not None:\n close_method()\n super().close()\n", "path": "napari/_qt/layer_controls/qt_layer_controls_base.py"}]} | 1,870 | 462 |
gh_patches_debug_31134 | rasdani/github-patches | git_diff | pyload__pyload-1535 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plugin DlProtectCom doesn't work
Trying to download http://www.dl-protect.com/2C964B88 gives the rror 'NoneType' object has no attribute 'group' 0.00 B
</issue>
<code>
[start of module/plugins/crypter/DlProtectCom.py]
1 # -*- coding: utf-8 -*-
2
3 import re
4 import time
5
6 from base64 import urlsafe_b64encode
7
8 from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
9
10
11 class DlProtectCom(SimpleCrypter):
12 __name__ = "DlProtectCom"
13 __type__ = "crypter"
14 __version__ = "0.03"
15
16 __pattern__ = r'https?://(?:www\.)?dl-protect\.com/((en|fr)/)?\w+'
17 __config__ = [("use_premium" , "bool", "Use premium account if available" , True),
18 ("use_subfolder" , "bool", "Save package to subfolder" , True),
19 ("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
20
21 __description__ = """Dl-protect.com decrypter plugin"""
22 __license__ = "GPLv3"
23 __authors__ = [("Walter Purcaro", "[email protected]")]
24
25
26 COOKIES = [("dl-protect.com", "l", "en")]
27
28 OFFLINE_PATTERN = r'Unfortunately, the link you are looking for is not found'
29
30
31 def getLinks(self):
32 # Direct link with redirect
33 if not re.match(r"https?://(?:www\.)?dl-protect\.com/.+", self.req.http.lastEffectiveURL):
34 return [self.req.http.lastEffectiveURL]
35
36 post_req = {'key' : re.search(r'name="key" value="(.+?)"', self.html).group(1),
37 'submitform': ""}
38
39 if "Please click on continue to see the content" in self.html:
40 post_req['submitform'] = "Continue"
41 self.wait(2)
42
43 else:
44 mstime = int(round(time.time() * 1000))
45 b64time = "_" + urlsafe_b64encode(str(mstime)).replace("=", "%3D")
46
47 post_req.update({'i' : b64time,
48 'submitform': "Decrypt+link"})
49
50 if "Password :" in self.html:
51 post_req['pwd'] = self.getPassword()
52
53 if "Security Code" in self.html:
54 captcha_id = re.search(r'/captcha\.php\?uid=(.+?)"', self.html).group(1)
55 captcha_url = "http://www.dl-protect.com/captcha.php?uid=" + captcha_id
56 captcha_code = self.decryptCaptcha(captcha_url, imgtype="gif")
57
58 post_req['secure'] = captcha_code
59
60 self.html = self.load(self.pyfile.url, post=post_req)
61
62 for errmsg in ("The password is incorrect", "The security code is incorrect"):
63 if errmsg in self.html:
64 self.fail(_(errmsg[1:]))
65
66 return re.findall(r'<a href="([^/].+?)" target="_blank">', self.html)
67
68
69 getInfo = create_getInfo(DlProtectCom)
70
[end of module/plugins/crypter/DlProtectCom.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/module/plugins/crypter/DlProtectCom.py b/module/plugins/crypter/DlProtectCom.py
--- a/module/plugins/crypter/DlProtectCom.py
+++ b/module/plugins/crypter/DlProtectCom.py
@@ -11,7 +11,7 @@
class DlProtectCom(SimpleCrypter):
__name__ = "DlProtectCom"
__type__ = "crypter"
- __version__ = "0.03"
+ __version__ = "0.04"
__pattern__ = r'https?://(?:www\.)?dl-protect\.com/((en|fr)/)?\w+'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
@@ -36,7 +36,7 @@
post_req = {'key' : re.search(r'name="key" value="(.+?)"', self.html).group(1),
'submitform': ""}
- if "Please click on continue to see the content" in self.html:
+ if "Please click on continue to see the links" in self.html:
post_req['submitform'] = "Continue"
self.wait(2)
@@ -51,11 +51,10 @@
post_req['pwd'] = self.getPassword()
if "Security Code" in self.html:
- captcha_id = re.search(r'/captcha\.php\?uid=(.+?)"', self.html).group(1)
- captcha_url = "http://www.dl-protect.com/captcha.php?uid=" + captcha_id
- captcha_code = self.decryptCaptcha(captcha_url, imgtype="gif")
-
- post_req['secure'] = captcha_code
+ m = re.search(r'/captcha\.php\?key=(.+?)"', self.html)
+ if m:
+ captcha_code = self.decryptCaptcha("http://www.dl-protect.com/captcha.php?key=" + m.group(1), imgtype="gif")
+ post_req['secure'] = captcha_code
self.html = self.load(self.pyfile.url, post=post_req)
| {"golden_diff": "diff --git a/module/plugins/crypter/DlProtectCom.py b/module/plugins/crypter/DlProtectCom.py\n--- a/module/plugins/crypter/DlProtectCom.py\n+++ b/module/plugins/crypter/DlProtectCom.py\n@@ -11,7 +11,7 @@\n class DlProtectCom(SimpleCrypter):\n __name__ = \"DlProtectCom\"\n __type__ = \"crypter\"\n- __version__ = \"0.03\"\n+ __version__ = \"0.04\"\n \n __pattern__ = r'https?://(?:www\\.)?dl-protect\\.com/((en|fr)/)?\\w+'\n __config__ = [(\"use_premium\" , \"bool\", \"Use premium account if available\" , True),\n@@ -36,7 +36,7 @@\n post_req = {'key' : re.search(r'name=\"key\" value=\"(.+?)\"', self.html).group(1),\n 'submitform': \"\"}\n \n- if \"Please click on continue to see the content\" in self.html:\n+ if \"Please click on continue to see the links\" in self.html:\n post_req['submitform'] = \"Continue\"\n self.wait(2)\n \n@@ -51,11 +51,10 @@\n post_req['pwd'] = self.getPassword()\n \n if \"Security Code\" in self.html:\n- captcha_id = re.search(r'/captcha\\.php\\?uid=(.+?)\"', self.html).group(1)\n- captcha_url = \"http://www.dl-protect.com/captcha.php?uid=\" + captcha_id\n- captcha_code = self.decryptCaptcha(captcha_url, imgtype=\"gif\")\n-\n- post_req['secure'] = captcha_code\n+ m = re.search(r'/captcha\\.php\\?key=(.+?)\"', self.html)\n+ if m:\n+ captcha_code = self.decryptCaptcha(\"http://www.dl-protect.com/captcha.php?key=\" + m.group(1), imgtype=\"gif\")\n+ post_req['secure'] = captcha_code\n \n self.html = self.load(self.pyfile.url, post=post_req)\n", "issue": "Plugin DlProtectCom doesn't work\nTrying to download http://www.dl-protect.com/2C964B88 gives the rror 'NoneType' object has no attribute 'group' 0.00 B\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\nimport time\n\nfrom base64 import urlsafe_b64encode\n\nfrom module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo\n\n\nclass DlProtectCom(SimpleCrypter):\n __name__ = \"DlProtectCom\"\n __type__ = \"crypter\"\n __version__ = \"0.03\"\n\n __pattern__ = r'https?://(?:www\\.)?dl-protect\\.com/((en|fr)/)?\\w+'\n __config__ = [(\"use_premium\" , \"bool\", \"Use premium account if available\" , True),\n (\"use_subfolder\" , \"bool\", \"Save package to subfolder\" , True),\n (\"subfolder_per_pack\", \"bool\", \"Create a subfolder for each package\", True)]\n\n __description__ = \"\"\"Dl-protect.com decrypter plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n\n COOKIES = [(\"dl-protect.com\", \"l\", \"en\")]\n\n OFFLINE_PATTERN = r'Unfortunately, the link you are looking for is not found'\n\n\n def getLinks(self):\n # Direct link with redirect\n if not re.match(r\"https?://(?:www\\.)?dl-protect\\.com/.+\", self.req.http.lastEffectiveURL):\n return [self.req.http.lastEffectiveURL]\n\n post_req = {'key' : re.search(r'name=\"key\" value=\"(.+?)\"', self.html).group(1),\n 'submitform': \"\"}\n\n if \"Please click on continue to see the content\" in self.html:\n post_req['submitform'] = \"Continue\"\n self.wait(2)\n\n else:\n mstime = int(round(time.time() * 1000))\n b64time = \"_\" + urlsafe_b64encode(str(mstime)).replace(\"=\", \"%3D\")\n\n post_req.update({'i' : b64time,\n 'submitform': \"Decrypt+link\"})\n\n if \"Password :\" in self.html:\n post_req['pwd'] = self.getPassword()\n\n if \"Security Code\" in self.html:\n captcha_id = re.search(r'/captcha\\.php\\?uid=(.+?)\"', self.html).group(1)\n captcha_url = \"http://www.dl-protect.com/captcha.php?uid=\" + captcha_id\n captcha_code = self.decryptCaptcha(captcha_url, imgtype=\"gif\")\n\n post_req['secure'] = captcha_code\n\n self.html = self.load(self.pyfile.url, post=post_req)\n\n for errmsg in (\"The password is incorrect\", \"The security code is incorrect\"):\n if errmsg in self.html:\n self.fail(_(errmsg[1:]))\n\n return re.findall(r'<a href=\"([^/].+?)\" target=\"_blank\">', self.html)\n\n\ngetInfo = create_getInfo(DlProtectCom)\n", "path": "module/plugins/crypter/DlProtectCom.py"}]} | 1,396 | 485 |
gh_patches_debug_13065 | rasdani/github-patches | git_diff | openai__gym-2646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug Report] AttributeError: 'Discrete' object has no attribute 'start'
**Describe the bug**
Change in https://github.com/openai/gym/pull/2470 introduced a bug when loading pre-trained agents with previous version of gym.
Fix is probably similar to https://github.com/DLR-RM/stable-baselines3/issues/573 ...
**Code example**
from RL Zoo CI
See https://github.com/DLR-RM/rl-baselines3-zoo/pull/210 (note: the CI now passes because I downgraded to gym 0.21)
and
https://github.com/DLR-RM/rl-baselines3-zoo/runs/5305883843?check_suite_focus=true
```
python enjoy --algo qrdqn --env Acrobot-v1
```
traceback:
```
Loading rl-trained-agents/qrdqn/Acrobot-v1_1/Acrobot-v1.zip
----------------------------- Captured stderr call -----------------------------
/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/save_util.py:166: UserWarning: Could not deserialize object exploration_schedule. Consider using `custom_objects` argument to replace this object.
warnings.warn(
Traceback (most recent call last):
File "/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py", line 248, in <module>
main()
File "/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py", line 178, in main
model = ALGOS[algo].load(model_path, env=env, custom_objects=custom_objects, **kwargs)
File "/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/base_class.py", line 709, in load
check_for_correct_spaces(env, data["observation_space"], data["action_space"])
File "/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/utils.py", line 224, in check_for_correct_spaces
if action_space != env.action_space:
File "/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/gym/spaces/discrete.py", line 50, in __eq__
and self.start == other.start
AttributeError: 'Discrete' object has no attribute 'start'
```
**System Info**
Gym 0.22
**Additional context**
Add any other context about the problem here.
### Checklist
- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
</issue>
<code>
[start of gym/spaces/discrete.py]
1 from typing import Optional
2
3 import numpy as np
4 from .space import Space
5
6
7 class Discrete(Space[int]):
8 r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`.
9
10 A start value can be optionally specified to shift the range
11 to :math:`\{ a, a+1, \\dots, a+n-1 \}`.
12
13 Example::
14
15 >>> Discrete(2)
16 >>> Discrete(3, start=-1) # {-1, 0, 1}
17
18 """
19
20 def __init__(self, n: int, seed: Optional[int] = None, start: int = 0):
21 assert n > 0, "n (counts) have to be positive"
22 assert isinstance(start, (int, np.integer))
23 self.n = int(n)
24 self.start = int(start)
25 super().__init__((), np.int64, seed)
26
27 def sample(self) -> int:
28 return self.start + self.np_random.randint(self.n)
29
30 def contains(self, x) -> bool:
31 if isinstance(x, int):
32 as_int = x
33 elif isinstance(x, (np.generic, np.ndarray)) and (
34 x.dtype.char in np.typecodes["AllInteger"] and x.shape == ()
35 ):
36 as_int = int(x) # type: ignore
37 else:
38 return False
39 return self.start <= as_int < self.start + self.n
40
41 def __repr__(self) -> str:
42 if self.start != 0:
43 return "Discrete(%d, start=%d)" % (self.n, self.start)
44 return "Discrete(%d)" % self.n
45
46 def __eq__(self, other) -> bool:
47 return (
48 isinstance(other, Discrete)
49 and self.n == other.n
50 and self.start == other.start
51 )
52
[end of gym/spaces/discrete.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/spaces/discrete.py b/gym/spaces/discrete.py
--- a/gym/spaces/discrete.py
+++ b/gym/spaces/discrete.py
@@ -12,7 +12,7 @@
Example::
- >>> Discrete(2)
+ >>> Discrete(2) # {0, 1}
>>> Discrete(3, start=-1) # {-1, 0, 1}
"""
@@ -49,3 +49,17 @@
and self.n == other.n
and self.start == other.start
)
+
+ def __setstate__(self, state):
+ super().__setstate__(state)
+
+ # Don't mutate the original state
+ state = dict(state)
+
+ # Allow for loading of legacy states.
+ # See https://github.com/openai/gym/pull/2470
+ if "start" not in state:
+ state["start"] = 0
+
+ # Update our state
+ self.__dict__.update(state)
| {"golden_diff": "diff --git a/gym/spaces/discrete.py b/gym/spaces/discrete.py\n--- a/gym/spaces/discrete.py\n+++ b/gym/spaces/discrete.py\n@@ -12,7 +12,7 @@\n \n Example::\n \n- >>> Discrete(2)\n+ >>> Discrete(2) # {0, 1}\n >>> Discrete(3, start=-1) # {-1, 0, 1}\n \n \"\"\"\n@@ -49,3 +49,17 @@\n and self.n == other.n\n and self.start == other.start\n )\n+\n+ def __setstate__(self, state):\n+ super().__setstate__(state)\n+\n+ # Don't mutate the original state\n+ state = dict(state)\n+\n+ # Allow for loading of legacy states.\n+ # See https://github.com/openai/gym/pull/2470\n+ if \"start\" not in state:\n+ state[\"start\"] = 0\n+\n+ # Update our state\n+ self.__dict__.update(state)\n", "issue": "[Bug Report] AttributeError: 'Discrete' object has no attribute 'start'\n**Describe the bug**\r\nChange in https://github.com/openai/gym/pull/2470 introduced a bug when loading pre-trained agents with previous version of gym.\r\n\r\nFix is probably similar to https://github.com/DLR-RM/stable-baselines3/issues/573 ...\r\n\r\n\r\n**Code example**\r\nfrom RL Zoo CI\r\n\r\nSee https://github.com/DLR-RM/rl-baselines3-zoo/pull/210 (note: the CI now passes because I downgraded to gym 0.21)\r\nand\r\nhttps://github.com/DLR-RM/rl-baselines3-zoo/runs/5305883843?check_suite_focus=true\r\n\r\n```\r\npython enjoy --algo qrdqn --env Acrobot-v1\r\n```\r\n\r\ntraceback:\r\n```\r\n Loading rl-trained-agents/qrdqn/Acrobot-v1_1/Acrobot-v1.zip\r\n----------------------------- Captured stderr call -----------------------------\r\n/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/save_util.py:166: UserWarning: Could not deserialize object exploration_schedule. Consider using `custom_objects` argument to replace this object.\r\n warnings.warn(\r\nTraceback (most recent call last):\r\n File \"/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py\", line 248, in <module>\r\n main()\r\n File \"/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py\", line 178, in main\r\n model = ALGOS[algo].load(model_path, env=env, custom_objects=custom_objects, **kwargs)\r\n File \"/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/base_class.py\", line 709, in load\r\n check_for_correct_spaces(env, data[\"observation_space\"], data[\"action_space\"])\r\n File \"/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/utils.py\", line 224, in check_for_correct_spaces\r\n if action_space != env.action_space:\r\n File \"/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/gym/spaces/discrete.py\", line 50, in __eq__\r\n and self.start == other.start\r\nAttributeError: 'Discrete' object has no attribute 'start'\r\n```\r\n\r\n**System Info**\r\nGym 0.22\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n### Checklist\r\n\r\n- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport numpy as np\nfrom .space import Space\n\n\nclass Discrete(Space[int]):\n r\"\"\"A discrete space in :math:`\\{ 0, 1, \\\\dots, n-1 \\}`.\n\n A start value can be optionally specified to shift the range\n to :math:`\\{ a, a+1, \\\\dots, a+n-1 \\}`.\n\n Example::\n\n >>> Discrete(2)\n >>> Discrete(3, start=-1) # {-1, 0, 1}\n\n \"\"\"\n\n def __init__(self, n: int, seed: Optional[int] = None, start: int = 0):\n assert n > 0, \"n (counts) have to be positive\"\n assert isinstance(start, (int, np.integer))\n self.n = int(n)\n self.start = int(start)\n super().__init__((), np.int64, seed)\n\n def sample(self) -> int:\n return self.start + self.np_random.randint(self.n)\n\n def contains(self, x) -> bool:\n if isinstance(x, int):\n as_int = x\n elif isinstance(x, (np.generic, np.ndarray)) and (\n x.dtype.char in np.typecodes[\"AllInteger\"] and x.shape == ()\n ):\n as_int = int(x) # type: ignore\n else:\n return False\n return self.start <= as_int < self.start + self.n\n\n def __repr__(self) -> str:\n if self.start != 0:\n return \"Discrete(%d, start=%d)\" % (self.n, self.start)\n return \"Discrete(%d)\" % self.n\n\n def __eq__(self, other) -> bool:\n return (\n isinstance(other, Discrete)\n and self.n == other.n\n and self.start == other.start\n )\n", "path": "gym/spaces/discrete.py"}]} | 1,694 | 245 |
gh_patches_debug_34565 | rasdani/github-patches | git_diff | pallets__click-1328 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
complex example - misleading name for context
The name `Context` and `pass_context` are misleading in the complex example, since the `Context` defined in the example shares a name with the click `Context`. Maybe a different name such as "Environment" or "Options" would be more appropriate.
</issue>
<code>
[start of examples/complex/complex/commands/cmd_status.py]
1 import click
2 from complex.cli import pass_context
3
4
5 @click.command('status', short_help='Shows file changes.')
6 @pass_context
7 def cli(ctx):
8 """Shows file changes in the current working directory."""
9 ctx.log('Changed files: none')
10 ctx.vlog('bla bla bla, debug info')
11
[end of examples/complex/complex/commands/cmd_status.py]
[start of examples/complex/complex/cli.py]
1 import os
2 import sys
3 import click
4
5
6 CONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')
7
8
9 class Context(object):
10
11 def __init__(self):
12 self.verbose = False
13 self.home = os.getcwd()
14
15 def log(self, msg, *args):
16 """Logs a message to stderr."""
17 if args:
18 msg %= args
19 click.echo(msg, file=sys.stderr)
20
21 def vlog(self, msg, *args):
22 """Logs a message to stderr only if verbose is enabled."""
23 if self.verbose:
24 self.log(msg, *args)
25
26
27 pass_context = click.make_pass_decorator(Context, ensure=True)
28 cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
29 'commands'))
30
31
32 class ComplexCLI(click.MultiCommand):
33
34 def list_commands(self, ctx):
35 rv = []
36 for filename in os.listdir(cmd_folder):
37 if filename.endswith('.py') and \
38 filename.startswith('cmd_'):
39 rv.append(filename[4:-3])
40 rv.sort()
41 return rv
42
43 def get_command(self, ctx, name):
44 try:
45 if sys.version_info[0] == 2:
46 name = name.encode('ascii', 'replace')
47 mod = __import__('complex.commands.cmd_' + name,
48 None, None, ['cli'])
49 except ImportError:
50 return
51 return mod.cli
52
53
54 @click.command(cls=ComplexCLI, context_settings=CONTEXT_SETTINGS)
55 @click.option('--home', type=click.Path(exists=True, file_okay=False,
56 resolve_path=True),
57 help='Changes the folder to operate on.')
58 @click.option('-v', '--verbose', is_flag=True,
59 help='Enables verbose mode.')
60 @pass_context
61 def cli(ctx, verbose, home):
62 """A complex command line interface."""
63 ctx.verbose = verbose
64 if home is not None:
65 ctx.home = home
66
[end of examples/complex/complex/cli.py]
[start of examples/complex/complex/commands/cmd_init.py]
1 import click
2 from complex.cli import pass_context
3
4
5 @click.command('init', short_help='Initializes a repo.')
6 @click.argument('path', required=False, type=click.Path(resolve_path=True))
7 @pass_context
8 def cli(ctx, path):
9 """Initializes a repository."""
10 if path is None:
11 path = ctx.home
12 ctx.log('Initialized the repository in %s',
13 click.format_filename(path))
14
[end of examples/complex/complex/commands/cmd_init.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/complex/complex/cli.py b/examples/complex/complex/cli.py
--- a/examples/complex/complex/cli.py
+++ b/examples/complex/complex/cli.py
@@ -6,7 +6,7 @@
CONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')
-class Context(object):
+class Environment(object):
def __init__(self):
self.verbose = False
@@ -24,7 +24,7 @@
self.log(msg, *args)
-pass_context = click.make_pass_decorator(Context, ensure=True)
+pass_environment = click.make_pass_decorator(Environment, ensure=True)
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'commands'))
@@ -57,7 +57,7 @@
help='Changes the folder to operate on.')
@click.option('-v', '--verbose', is_flag=True,
help='Enables verbose mode.')
-@pass_context
+@pass_environment
def cli(ctx, verbose, home):
"""A complex command line interface."""
ctx.verbose = verbose
diff --git a/examples/complex/complex/commands/cmd_init.py b/examples/complex/complex/commands/cmd_init.py
--- a/examples/complex/complex/commands/cmd_init.py
+++ b/examples/complex/complex/commands/cmd_init.py
@@ -1,10 +1,10 @@
import click
-from complex.cli import pass_context
+from complex.cli import pass_environment
@click.command('init', short_help='Initializes a repo.')
@click.argument('path', required=False, type=click.Path(resolve_path=True))
-@pass_context
+@pass_environment
def cli(ctx, path):
"""Initializes a repository."""
if path is None:
diff --git a/examples/complex/complex/commands/cmd_status.py b/examples/complex/complex/commands/cmd_status.py
--- a/examples/complex/complex/commands/cmd_status.py
+++ b/examples/complex/complex/commands/cmd_status.py
@@ -1,9 +1,9 @@
import click
-from complex.cli import pass_context
+from complex.cli import pass_environment
@click.command('status', short_help='Shows file changes.')
-@pass_context
+@pass_environment
def cli(ctx):
"""Shows file changes in the current working directory."""
ctx.log('Changed files: none')
| {"golden_diff": "diff --git a/examples/complex/complex/cli.py b/examples/complex/complex/cli.py\n--- a/examples/complex/complex/cli.py\n+++ b/examples/complex/complex/cli.py\n@@ -6,7 +6,7 @@\n CONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')\n \n \n-class Context(object):\n+class Environment(object):\n \n def __init__(self):\n self.verbose = False\n@@ -24,7 +24,7 @@\n self.log(msg, *args)\n \n \n-pass_context = click.make_pass_decorator(Context, ensure=True)\n+pass_environment = click.make_pass_decorator(Environment, ensure=True)\n cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'commands'))\n \n@@ -57,7 +57,7 @@\n help='Changes the folder to operate on.')\n @click.option('-v', '--verbose', is_flag=True,\n help='Enables verbose mode.')\n-@pass_context\n+@pass_environment\n def cli(ctx, verbose, home):\n \"\"\"A complex command line interface.\"\"\"\n ctx.verbose = verbose\ndiff --git a/examples/complex/complex/commands/cmd_init.py b/examples/complex/complex/commands/cmd_init.py\n--- a/examples/complex/complex/commands/cmd_init.py\n+++ b/examples/complex/complex/commands/cmd_init.py\n@@ -1,10 +1,10 @@\n import click\n-from complex.cli import pass_context\n+from complex.cli import pass_environment\n \n \n @click.command('init', short_help='Initializes a repo.')\n @click.argument('path', required=False, type=click.Path(resolve_path=True))\n-@pass_context\n+@pass_environment\n def cli(ctx, path):\n \"\"\"Initializes a repository.\"\"\"\n if path is None:\ndiff --git a/examples/complex/complex/commands/cmd_status.py b/examples/complex/complex/commands/cmd_status.py\n--- a/examples/complex/complex/commands/cmd_status.py\n+++ b/examples/complex/complex/commands/cmd_status.py\n@@ -1,9 +1,9 @@\n import click\n-from complex.cli import pass_context\n+from complex.cli import pass_environment\n \n \n @click.command('status', short_help='Shows file changes.')\n-@pass_context\n+@pass_environment\n def cli(ctx):\n \"\"\"Shows file changes in the current working directory.\"\"\"\n ctx.log('Changed files: none')\n", "issue": "complex example - misleading name for context\nThe name `Context` and `pass_context` are misleading in the complex example, since the `Context` defined in the example shares a name with the click `Context`. Maybe a different name such as \"Environment\" or \"Options\" would be more appropriate.\n", "before_files": [{"content": "import click\nfrom complex.cli import pass_context\n\n\[email protected]('status', short_help='Shows file changes.')\n@pass_context\ndef cli(ctx):\n \"\"\"Shows file changes in the current working directory.\"\"\"\n ctx.log('Changed files: none')\n ctx.vlog('bla bla bla, debug info')\n", "path": "examples/complex/complex/commands/cmd_status.py"}, {"content": "import os\nimport sys\nimport click\n\n\nCONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')\n\n\nclass Context(object):\n\n def __init__(self):\n self.verbose = False\n self.home = os.getcwd()\n\n def log(self, msg, *args):\n \"\"\"Logs a message to stderr.\"\"\"\n if args:\n msg %= args\n click.echo(msg, file=sys.stderr)\n\n def vlog(self, msg, *args):\n \"\"\"Logs a message to stderr only if verbose is enabled.\"\"\"\n if self.verbose:\n self.log(msg, *args)\n\n\npass_context = click.make_pass_decorator(Context, ensure=True)\ncmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'commands'))\n\n\nclass ComplexCLI(click.MultiCommand):\n\n def list_commands(self, ctx):\n rv = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and \\\n filename.startswith('cmd_'):\n rv.append(filename[4:-3])\n rv.sort()\n return rv\n\n def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('complex.commands.cmd_' + name,\n None, None, ['cli'])\n except ImportError:\n return\n return mod.cli\n\n\[email protected](cls=ComplexCLI, context_settings=CONTEXT_SETTINGS)\[email protected]('--home', type=click.Path(exists=True, file_okay=False,\n resolve_path=True),\n help='Changes the folder to operate on.')\[email protected]('-v', '--verbose', is_flag=True,\n help='Enables verbose mode.')\n@pass_context\ndef cli(ctx, verbose, home):\n \"\"\"A complex command line interface.\"\"\"\n ctx.verbose = verbose\n if home is not None:\n ctx.home = home\n", "path": "examples/complex/complex/cli.py"}, {"content": "import click\nfrom complex.cli import pass_context\n\n\[email protected]('init', short_help='Initializes a repo.')\[email protected]('path', required=False, type=click.Path(resolve_path=True))\n@pass_context\ndef cli(ctx, path):\n \"\"\"Initializes a repository.\"\"\"\n if path is None:\n path = ctx.home\n ctx.log('Initialized the repository in %s',\n click.format_filename(path))\n", "path": "examples/complex/complex/commands/cmd_init.py"}]} | 1,367 | 500 |
gh_patches_debug_249 | rasdani/github-patches | git_diff | aws__aws-cli-3790 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The aws-cli bundle package uses an insecure version of PyYAML
### awscli version:<br>
`aws-cli/1.16.52 Python/2.7.15 Linux/4.14.77-69.57.amzn1.x86_64 exec-env/AWS_ECS_EC2 botocore/1.12.42`
[NVD entry](https://nvd.nist.gov/vuln/detail/CVE-2017-18342)
This issue was found when vulnerability alerts started appearing in Twistlock in response to scans of Docker images that we are using in several applications. The generic error found in these outlines is as such:<br>
```
Impacted versions: <=3.13
In PyYAML before 4.1, the yaml.load() API could execute arbitrary code. In other words, yaml.safe_load is not used.
```
These images are not natively using PyYAML, so this led us to a Docker `RUN` line in a Dockerfile that executed a script that contains a line of code that executes the installation of the `aws-cli` bundle using the following URL:<br>
`https://s3.amazonaws.com/aws-cli/awscli-bundle.zip`
Unpacking this archive shows a list of package dependencies that includes the vulnerable version of PyYAML:<br>
`awscli-bundle/packages/PyYAML-3.13.tar.gz`
The latest (and actually secure) version of PyYAML appears to be 4.1 according to the developer via the [GitHub repo](https://github.com/yaml/pyyaml).
### Request
Is it possible to have the patched version of PyYAML added to this bundle to avoid this vulnerability?
Thank you!
</issue>
<code>
[start of awscli/customizations/ecs/filehelpers.py]
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import json
15 import yaml
16
17 from awscli.customizations.ecs import exceptions
18
19 MAX_CHAR_LENGTH = 46
20 APP_PREFIX = 'AppECS-'
21 DGP_PREFIX = 'DgpECS-'
22
23
24 def find_required_key(resource_name, obj, key):
25
26 if obj is None:
27 raise exceptions.MissingPropertyError(
28 resource=resource_name, prop_name=key)
29
30 result = _get_case_insensitive_key(obj, key)
31
32 if result is None:
33 raise exceptions.MissingPropertyError(
34 resource=resource_name, prop_name=key)
35 else:
36 return result
37
38
39 def _get_case_insensitive_key(target_obj, target_key):
40 key_to_match = target_key.lower()
41 key_list = target_obj.keys()
42
43 for key in key_list:
44 if key.lower() == key_to_match:
45 return key
46
47
48 def get_app_name(service, cluster, app_value):
49 if app_value is not None:
50 return app_value
51 else:
52 suffix = _get_ecs_suffix(service, cluster)
53 return APP_PREFIX + suffix
54
55
56 def get_cluster_name_from_arn(arn):
57 return arn.split('/')[1]
58
59
60 def get_deploy_group_name(service, cluster, dg_value):
61 if dg_value is not None:
62 return dg_value
63 else:
64 suffix = _get_ecs_suffix(service, cluster)
65 return DGP_PREFIX + suffix
66
67
68 def _get_ecs_suffix(service, cluster):
69 if cluster is None:
70 cluster_name = 'default'
71 else:
72 cluster_name = cluster[:MAX_CHAR_LENGTH]
73
74 return cluster_name + '-' + service[:MAX_CHAR_LENGTH]
75
76
77 def parse_appspec(appspec_str):
78 try:
79 return json.loads(appspec_str)
80 except ValueError:
81 return yaml.load(appspec_str)
82
[end of awscli/customizations/ecs/filehelpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/ecs/filehelpers.py b/awscli/customizations/ecs/filehelpers.py
--- a/awscli/customizations/ecs/filehelpers.py
+++ b/awscli/customizations/ecs/filehelpers.py
@@ -78,4 +78,4 @@
try:
return json.loads(appspec_str)
except ValueError:
- return yaml.load(appspec_str)
+ return yaml.safe_load(appspec_str)
| {"golden_diff": "diff --git a/awscli/customizations/ecs/filehelpers.py b/awscli/customizations/ecs/filehelpers.py\n--- a/awscli/customizations/ecs/filehelpers.py\n+++ b/awscli/customizations/ecs/filehelpers.py\n@@ -78,4 +78,4 @@\n try:\n return json.loads(appspec_str)\n except ValueError:\n- return yaml.load(appspec_str)\n+ return yaml.safe_load(appspec_str)\n", "issue": "The aws-cli bundle package uses an insecure version of PyYAML\n### awscli version:<br>\r\n`aws-cli/1.16.52 Python/2.7.15 Linux/4.14.77-69.57.amzn1.x86_64 exec-env/AWS_ECS_EC2 botocore/1.12.42`\r\n\r\n[NVD entry](https://nvd.nist.gov/vuln/detail/CVE-2017-18342)\r\n\r\nThis issue was found when vulnerability alerts started appearing in Twistlock in response to scans of Docker images that we are using in several applications. The generic error found in these outlines is as such:<br>\r\n\r\n```\r\nImpacted versions: <=3.13\r\nIn PyYAML before 4.1, the yaml.load() API could execute arbitrary code. In other words, yaml.safe_load is not used.\r\n```\r\n\r\nThese images are not natively using PyYAML, so this led us to a Docker `RUN` line in a Dockerfile that executed a script that contains a line of code that executes the installation of the `aws-cli` bundle using the following URL:<br>\r\n\r\n`https://s3.amazonaws.com/aws-cli/awscli-bundle.zip`\r\n\r\nUnpacking this archive shows a list of package dependencies that includes the vulnerable version of PyYAML:<br>\r\n\r\n`awscli-bundle/packages/PyYAML-3.13.tar.gz`\r\n\r\nThe latest (and actually secure) version of PyYAML appears to be 4.1 according to the developer via the [GitHub repo](https://github.com/yaml/pyyaml).\r\n\r\n### Request\r\n\r\nIs it possible to have the patched version of PyYAML added to this bundle to avoid this vulnerability?\r\n\r\nThank you!\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport json\nimport yaml\n\nfrom awscli.customizations.ecs import exceptions\n\nMAX_CHAR_LENGTH = 46\nAPP_PREFIX = 'AppECS-'\nDGP_PREFIX = 'DgpECS-'\n\n\ndef find_required_key(resource_name, obj, key):\n\n if obj is None:\n raise exceptions.MissingPropertyError(\n resource=resource_name, prop_name=key)\n\n result = _get_case_insensitive_key(obj, key)\n\n if result is None:\n raise exceptions.MissingPropertyError(\n resource=resource_name, prop_name=key)\n else:\n return result\n\n\ndef _get_case_insensitive_key(target_obj, target_key):\n key_to_match = target_key.lower()\n key_list = target_obj.keys()\n\n for key in key_list:\n if key.lower() == key_to_match:\n return key\n\n\ndef get_app_name(service, cluster, app_value):\n if app_value is not None:\n return app_value\n else:\n suffix = _get_ecs_suffix(service, cluster)\n return APP_PREFIX + suffix\n\n\ndef get_cluster_name_from_arn(arn):\n return arn.split('/')[1]\n\n\ndef get_deploy_group_name(service, cluster, dg_value):\n if dg_value is not None:\n return dg_value\n else:\n suffix = _get_ecs_suffix(service, cluster)\n return DGP_PREFIX + suffix\n\n\ndef _get_ecs_suffix(service, cluster):\n if cluster is None:\n cluster_name = 'default'\n else:\n cluster_name = cluster[:MAX_CHAR_LENGTH]\n\n return cluster_name + '-' + service[:MAX_CHAR_LENGTH]\n\n\ndef parse_appspec(appspec_str):\n try:\n return json.loads(appspec_str)\n except ValueError:\n return yaml.load(appspec_str)\n", "path": "awscli/customizations/ecs/filehelpers.py"}]} | 1,599 | 95 |
gh_patches_debug_5930 | rasdani/github-patches | git_diff | getsentry__sentry-4564 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BrowserExtensionsFilter: _gCrWeb autofill error in iOS Chrome not ignored
This seems to be a relatively well-known issue on Chrome for iOS. It seems this could be ignored safely by the filter as well.
Source: https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE
</issue>
<code>
[start of src/sentry/filters/browser_extensions.py]
1 from __future__ import absolute_import
2
3 from .base import Filter
4
5 import re
6
7 EXTENSION_EXC_VALUES = re.compile('|'.join((re.escape(x) for x in (
8 # Random plugins/extensions
9 'top.GLOBALS',
10 # See: http://blog.errorception.com/2012/03/tale-of-unfindable-js-error. html
11 'originalCreateNotification',
12 'canvas.contentDocument',
13 'MyApp_RemoveAllHighlights',
14 'http://tt.epicplay.com',
15 'Can\'t find variable: ZiteReader',
16 'jigsaw is not defined',
17 'ComboSearch is not defined',
18 'http://loading.retry.widdit.com/',
19 'atomicFindClose',
20 # Facebook borked
21 'fb_xd_fragment',
22 # ISP "optimizing" proxy - `Cache-Control: no-transform` seems to
23 # reduce this. (thanks @acdha)
24 # See http://stackoverflow.com/questions/4113268
25 'bmi_SafeAddOnload',
26 'EBCallBackMessageReceived',
27 # See http://toolbar.conduit.com/Developer/HtmlAndGadget/Methods/JSInjection.aspx
28 'conduitPage'
29 ))), re.I)
30
31 EXTENSION_EXC_SOURCES = re.compile('|'.join((
32 # Facebook flakiness
33 r'graph\.facebook\.com'
34 # Facebook blocked
35 r'connect\.facebook\.net\/en_US\/all\.js',
36 # Woopra flakiness
37 r'eatdifferent\.com\.woopra-ns\.com',
38 r'static\.woopra\.com\/js\/woopra\.js',
39 # Chrome extensions
40 r'^chrome(?:-extension)?:\/\/',
41 # Cacaoweb
42 r'127\.0\.0\.1:4001\/isrunning',
43 # Other
44 r'webappstoolbarba\.texthelp\.com\/',
45 r'metrics\.itunes\.apple\.com\.edgesuite\.net\/',
46 )), re.I)
47
48
49 class BrowserExtensionsFilter(Filter):
50 id = 'browser-extensions'
51 name = 'Filter out errors known to be caused by browser extensions'
52 description = 'Certain browser extensions will inject inline scripts and are known to cause errors.'
53
54 def get_exception_value(self, data):
55 try:
56 return data['sentry.interfaces.Exception']['values'][0]['value']
57 except (LookupError, TypeError):
58 return ''
59
60 def get_exception_source(self, data):
61 try:
62 return data['sentry.interfaces.Exception']['values'][0]['stacktrace']['frames'][-1]['abs_path']
63 except (LookupError, TypeError):
64 return ''
65
66 def test(self, data):
67 """
68 Test the exception value to determine if it looks like the error is
69 caused by a common browser extension.
70 """
71 if data.get('platform') != 'javascript':
72 return False
73
74 exc_value = self.get_exception_value(data)
75 if exc_value:
76 if EXTENSION_EXC_VALUES.search(exc_value):
77 return True
78
79 exc_source = self.get_exception_source(data)
80 if exc_source:
81 if EXTENSION_EXC_SOURCES.match(exc_source):
82 return True
83
84 return False
85
[end of src/sentry/filters/browser_extensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/filters/browser_extensions.py b/src/sentry/filters/browser_extensions.py
--- a/src/sentry/filters/browser_extensions.py
+++ b/src/sentry/filters/browser_extensions.py
@@ -24,6 +24,8 @@
# See http://stackoverflow.com/questions/4113268
'bmi_SafeAddOnload',
'EBCallBackMessageReceived',
+ # See https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE
+ '_gCrWeb',
# See http://toolbar.conduit.com/Developer/HtmlAndGadget/Methods/JSInjection.aspx
'conduitPage'
))), re.I)
| {"golden_diff": "diff --git a/src/sentry/filters/browser_extensions.py b/src/sentry/filters/browser_extensions.py\n--- a/src/sentry/filters/browser_extensions.py\n+++ b/src/sentry/filters/browser_extensions.py\n@@ -24,6 +24,8 @@\n # See http://stackoverflow.com/questions/4113268\n 'bmi_SafeAddOnload',\n 'EBCallBackMessageReceived',\n+ # See https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE\n+ '_gCrWeb',\n # See http://toolbar.conduit.com/Developer/HtmlAndGadget/Methods/JSInjection.aspx\n 'conduitPage'\n ))), re.I)\n", "issue": "BrowserExtensionsFilter: _gCrWeb autofill error in iOS Chrome not ignored\nThis seems to be a relatively well-known issue on Chrome for iOS. It seems this could be ignored safely by the filter as well.\r\n\r\nSource: https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom .base import Filter\n\nimport re\n\nEXTENSION_EXC_VALUES = re.compile('|'.join((re.escape(x) for x in (\n # Random plugins/extensions\n 'top.GLOBALS',\n # See: http://blog.errorception.com/2012/03/tale-of-unfindable-js-error. html\n 'originalCreateNotification',\n 'canvas.contentDocument',\n 'MyApp_RemoveAllHighlights',\n 'http://tt.epicplay.com',\n 'Can\\'t find variable: ZiteReader',\n 'jigsaw is not defined',\n 'ComboSearch is not defined',\n 'http://loading.retry.widdit.com/',\n 'atomicFindClose',\n # Facebook borked\n 'fb_xd_fragment',\n # ISP \"optimizing\" proxy - `Cache-Control: no-transform` seems to\n # reduce this. (thanks @acdha)\n # See http://stackoverflow.com/questions/4113268\n 'bmi_SafeAddOnload',\n 'EBCallBackMessageReceived',\n # See http://toolbar.conduit.com/Developer/HtmlAndGadget/Methods/JSInjection.aspx\n 'conduitPage'\n))), re.I)\n\nEXTENSION_EXC_SOURCES = re.compile('|'.join((\n # Facebook flakiness\n r'graph\\.facebook\\.com'\n # Facebook blocked\n r'connect\\.facebook\\.net\\/en_US\\/all\\.js',\n # Woopra flakiness\n r'eatdifferent\\.com\\.woopra-ns\\.com',\n r'static\\.woopra\\.com\\/js\\/woopra\\.js',\n # Chrome extensions\n r'^chrome(?:-extension)?:\\/\\/',\n # Cacaoweb\n r'127\\.0\\.0\\.1:4001\\/isrunning',\n # Other\n r'webappstoolbarba\\.texthelp\\.com\\/',\n r'metrics\\.itunes\\.apple\\.com\\.edgesuite\\.net\\/',\n)), re.I)\n\n\nclass BrowserExtensionsFilter(Filter):\n id = 'browser-extensions'\n name = 'Filter out errors known to be caused by browser extensions'\n description = 'Certain browser extensions will inject inline scripts and are known to cause errors.'\n\n def get_exception_value(self, data):\n try:\n return data['sentry.interfaces.Exception']['values'][0]['value']\n except (LookupError, TypeError):\n return ''\n\n def get_exception_source(self, data):\n try:\n return data['sentry.interfaces.Exception']['values'][0]['stacktrace']['frames'][-1]['abs_path']\n except (LookupError, TypeError):\n return ''\n\n def test(self, data):\n \"\"\"\n Test the exception value to determine if it looks like the error is\n caused by a common browser extension.\n \"\"\"\n if data.get('platform') != 'javascript':\n return False\n\n exc_value = self.get_exception_value(data)\n if exc_value:\n if EXTENSION_EXC_VALUES.search(exc_value):\n return True\n\n exc_source = self.get_exception_source(data)\n if exc_source:\n if EXTENSION_EXC_SOURCES.match(exc_source):\n return True\n\n return False\n", "path": "src/sentry/filters/browser_extensions.py"}]} | 1,486 | 167 |
gh_patches_debug_10586 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2014 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cholesky issue in HMC
In PyTorch, if Cholesky issue happens, a Runtime Error will be raised (instead of returning `nan`) regarding singular matrix. So it will be difficult to run MCMC with gaussian process kernels (which involves Cholesky decomposition of covariance matrices). This issue is raised in https://github.com/pyro-ppl/pyro/issues/1863 and #1973. Because `nan` is a valid potential energy in HMC, we should add a mechanism to catch this error.
My proposed solution is to add a try, catch exception to potential function so that when Cholesky issue happens, it will return `nan`.
</issue>
<code>
[start of pyro/ops/integrator.py]
1 import torch
2 from torch.autograd import grad
3
4
5 def velocity_verlet(z, r, potential_fn, inverse_mass_matrix, step_size, num_steps=1, z_grads=None):
6 r"""
7 Second order symplectic integrator that uses the velocity verlet algorithm.
8
9 :param dict z: dictionary of sample site names and their current values
10 (type :class:`~torch.Tensor`).
11 :param dict r: dictionary of sample site names and corresponding momenta
12 (type :class:`~torch.Tensor`).
13 :param callable potential_fn: function that returns potential energy given z
14 for each sample site. The negative gradient of the function with respect
15 to ``z`` determines the rate of change of the corresponding sites'
16 momenta ``r``.
17 :param torch.Tensor inverse_mass_matrix: a tensor :math:`M^{-1}` which is used
18 to calculate kinetic energy: :math:`E_{kinetic} = \frac{1}{2}z^T M^{-1} z`.
19 Here :math:`M` can be a 1D tensor (diagonal matrix) or a 2D tensor (dense matrix).
20 :param float step_size: step size for each time step iteration.
21 :param int num_steps: number of discrete time steps over which to integrate.
22 :param torch.Tensor z_grads: optional gradients of potential energy at current ``z``.
23 :return tuple (z_next, r_next, z_grads, potential_energy): next position and momenta,
24 together with the potential energy and its gradient w.r.t. ``z_next``.
25 """
26 z_next = z.copy()
27 r_next = r.copy()
28 for _ in range(num_steps):
29 z_next, r_next, z_grads, potential_energy = _single_step_verlet(z_next,
30 r_next,
31 potential_fn,
32 inverse_mass_matrix,
33 step_size,
34 z_grads)
35 return z_next, r_next, z_grads, potential_energy
36
37
38 def _single_step_verlet(z, r, potential_fn, inverse_mass_matrix, step_size, z_grads=None):
39 r"""
40 Single step velocity verlet that modifies the `z`, `r` dicts in place.
41 """
42
43 z_grads = potential_grad(potential_fn, z)[0] if z_grads is None else z_grads
44
45 for site_name in r:
46 r[site_name] = r[site_name] + 0.5 * step_size * (-z_grads[site_name]) # r(n+1/2)
47
48 r_grads = _kinetic_grad(inverse_mass_matrix, r)
49 for site_name in z:
50 z[site_name] = z[site_name] + step_size * r_grads[site_name] # z(n+1)
51
52 z_grads, potential_energy = potential_grad(potential_fn, z)
53 for site_name in r:
54 r[site_name] = r[site_name] + 0.5 * step_size * (-z_grads[site_name]) # r(n+1)
55
56 return z, r, z_grads, potential_energy
57
58
59 def potential_grad(potential_fn, z):
60 """
61 Gradient of `potential_fn` w.r.t. parameters z.
62
63 :param potential_fn: python callable that takes in a dictionary of parameters
64 and returns the potential energy.
65 :param dict z: dictionary of parameter values keyed by site name.
66 :return: tuple of `(z_grads, potential_energy)`, where `z_grads` is a dictionary
67 with the same keys as `z` containing gradients and potential_energy is a
68 torch scalar.
69 """
70 z_keys, z_nodes = zip(*z.items())
71 for node in z_nodes:
72 node.requires_grad_(True)
73 potential_energy = potential_fn(z)
74 grads = grad(potential_energy, z_nodes)
75 for node in z_nodes:
76 node.requires_grad_(False)
77 return dict(zip(z_keys, grads)), potential_energy.detach()
78
79
80 def _kinetic_grad(inverse_mass_matrix, r):
81 # XXX consider using list/OrderDict to store z and r
82 # so we don't have to sort the keys
83 r_flat = torch.cat([r[site_name].reshape(-1) for site_name in sorted(r)])
84 if inverse_mass_matrix.dim() == 1:
85 grads_flat = inverse_mass_matrix * r_flat
86 else:
87 grads_flat = inverse_mass_matrix.matmul(r_flat)
88
89 # unpacking
90 grads = {}
91 pos = 0
92 for site_name in sorted(r):
93 next_pos = pos + r[site_name].numel()
94 grads[site_name] = grads_flat[pos:next_pos].reshape(r[site_name].shape)
95 pos = next_pos
96 assert pos == grads_flat.size(0)
97 return grads
98
[end of pyro/ops/integrator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/ops/integrator.py b/pyro/ops/integrator.py
--- a/pyro/ops/integrator.py
+++ b/pyro/ops/integrator.py
@@ -70,7 +70,16 @@
z_keys, z_nodes = zip(*z.items())
for node in z_nodes:
node.requires_grad_(True)
- potential_energy = potential_fn(z)
+ try:
+ potential_energy = potential_fn(z)
+ # deal with singular matrices
+ except RuntimeError as e:
+ if "singular U" in str(e):
+ grads = {k: v.new_zeros(v.shape) for k, v in z.items()}
+ return grads, z_nodes[0].new_tensor(float('nan'))
+ else:
+ raise e
+
grads = grad(potential_energy, z_nodes)
for node in z_nodes:
node.requires_grad_(False)
| {"golden_diff": "diff --git a/pyro/ops/integrator.py b/pyro/ops/integrator.py\n--- a/pyro/ops/integrator.py\n+++ b/pyro/ops/integrator.py\n@@ -70,7 +70,16 @@\n z_keys, z_nodes = zip(*z.items())\n for node in z_nodes:\n node.requires_grad_(True)\n- potential_energy = potential_fn(z)\n+ try:\n+ potential_energy = potential_fn(z)\n+ # deal with singular matrices\n+ except RuntimeError as e:\n+ if \"singular U\" in str(e):\n+ grads = {k: v.new_zeros(v.shape) for k, v in z.items()}\n+ return grads, z_nodes[0].new_tensor(float('nan'))\n+ else:\n+ raise e\n+\n grads = grad(potential_energy, z_nodes)\n for node in z_nodes:\n node.requires_grad_(False)\n", "issue": "Cholesky issue in HMC\nIn PyTorch, if Cholesky issue happens, a Runtime Error will be raised (instead of returning `nan`) regarding singular matrix. So it will be difficult to run MCMC with gaussian process kernels (which involves Cholesky decomposition of covariance matrices). This issue is raised in https://github.com/pyro-ppl/pyro/issues/1863 and #1973. Because `nan` is a valid potential energy in HMC, we should add a mechanism to catch this error.\r\n\r\nMy proposed solution is to add a try, catch exception to potential function so that when Cholesky issue happens, it will return `nan`.\n", "before_files": [{"content": "import torch\nfrom torch.autograd import grad\n\n\ndef velocity_verlet(z, r, potential_fn, inverse_mass_matrix, step_size, num_steps=1, z_grads=None):\n r\"\"\"\n Second order symplectic integrator that uses the velocity verlet algorithm.\n\n :param dict z: dictionary of sample site names and their current values\n (type :class:`~torch.Tensor`).\n :param dict r: dictionary of sample site names and corresponding momenta\n (type :class:`~torch.Tensor`).\n :param callable potential_fn: function that returns potential energy given z\n for each sample site. The negative gradient of the function with respect\n to ``z`` determines the rate of change of the corresponding sites'\n momenta ``r``.\n :param torch.Tensor inverse_mass_matrix: a tensor :math:`M^{-1}` which is used\n to calculate kinetic energy: :math:`E_{kinetic} = \\frac{1}{2}z^T M^{-1} z`.\n Here :math:`M` can be a 1D tensor (diagonal matrix) or a 2D tensor (dense matrix).\n :param float step_size: step size for each time step iteration.\n :param int num_steps: number of discrete time steps over which to integrate.\n :param torch.Tensor z_grads: optional gradients of potential energy at current ``z``.\n :return tuple (z_next, r_next, z_grads, potential_energy): next position and momenta,\n together with the potential energy and its gradient w.r.t. ``z_next``.\n \"\"\"\n z_next = z.copy()\n r_next = r.copy()\n for _ in range(num_steps):\n z_next, r_next, z_grads, potential_energy = _single_step_verlet(z_next,\n r_next,\n potential_fn,\n inverse_mass_matrix,\n step_size,\n z_grads)\n return z_next, r_next, z_grads, potential_energy\n\n\ndef _single_step_verlet(z, r, potential_fn, inverse_mass_matrix, step_size, z_grads=None):\n r\"\"\"\n Single step velocity verlet that modifies the `z`, `r` dicts in place.\n \"\"\"\n\n z_grads = potential_grad(potential_fn, z)[0] if z_grads is None else z_grads\n\n for site_name in r:\n r[site_name] = r[site_name] + 0.5 * step_size * (-z_grads[site_name]) # r(n+1/2)\n\n r_grads = _kinetic_grad(inverse_mass_matrix, r)\n for site_name in z:\n z[site_name] = z[site_name] + step_size * r_grads[site_name] # z(n+1)\n\n z_grads, potential_energy = potential_grad(potential_fn, z)\n for site_name in r:\n r[site_name] = r[site_name] + 0.5 * step_size * (-z_grads[site_name]) # r(n+1)\n\n return z, r, z_grads, potential_energy\n\n\ndef potential_grad(potential_fn, z):\n \"\"\"\n Gradient of `potential_fn` w.r.t. parameters z.\n\n :param potential_fn: python callable that takes in a dictionary of parameters\n and returns the potential energy.\n :param dict z: dictionary of parameter values keyed by site name.\n :return: tuple of `(z_grads, potential_energy)`, where `z_grads` is a dictionary\n with the same keys as `z` containing gradients and potential_energy is a\n torch scalar.\n \"\"\"\n z_keys, z_nodes = zip(*z.items())\n for node in z_nodes:\n node.requires_grad_(True)\n potential_energy = potential_fn(z)\n grads = grad(potential_energy, z_nodes)\n for node in z_nodes:\n node.requires_grad_(False)\n return dict(zip(z_keys, grads)), potential_energy.detach()\n\n\ndef _kinetic_grad(inverse_mass_matrix, r):\n # XXX consider using list/OrderDict to store z and r\n # so we don't have to sort the keys\n r_flat = torch.cat([r[site_name].reshape(-1) for site_name in sorted(r)])\n if inverse_mass_matrix.dim() == 1:\n grads_flat = inverse_mass_matrix * r_flat\n else:\n grads_flat = inverse_mass_matrix.matmul(r_flat)\n\n # unpacking\n grads = {}\n pos = 0\n for site_name in sorted(r):\n next_pos = pos + r[site_name].numel()\n grads[site_name] = grads_flat[pos:next_pos].reshape(r[site_name].shape)\n pos = next_pos\n assert pos == grads_flat.size(0)\n return grads\n", "path": "pyro/ops/integrator.py"}]} | 1,914 | 201 |
gh_patches_debug_12638 | rasdani/github-patches | git_diff | Kinto__kinto-1850 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate to JSONschema Draft v7
Currently we use Draft4. The library that we use supports Draft7
https://json-schema.org/specification.html
The idea would be:
* to use `Draft7Validator` instead of `Draft4Validator` (nothing more...)
* list main changes and add links in CHANGELOG (see spec release notes)
* check compatibility with kinto-admin
</issue>
<code>
[start of kinto/schema_validation.py]
1 import colander
2 from jsonschema import Draft4Validator, ValidationError, SchemaError, RefResolutionError, validate
3 from pyramid.settings import asbool
4
5 from kinto.core import utils
6 from kinto.core.errors import raise_invalid
7 from kinto.views import object_exists_or_404
8
9
10 class JSONSchemaMapping(colander.SchemaNode):
11 def schema_type(self, **kw):
12 return colander.Mapping(unknown="preserve")
13
14 def deserialize(self, cstruct=colander.null):
15 # Start by deserializing a simple mapping.
16 validated = super().deserialize(cstruct)
17
18 # In case it is optional in parent schema.
19 if not validated or validated in (colander.null, colander.drop):
20 return validated
21 try:
22 check_schema(validated)
23 except ValidationError as e:
24 self.raise_invalid(e.message)
25 return validated
26
27
28 def check_schema(data):
29 try:
30 Draft4Validator.check_schema(data)
31 except SchemaError as e:
32 message = e.path.pop() + e.message
33 raise ValidationError(message)
34
35
36 def validate_schema(data, schema, ignore_fields=[]):
37 required_fields = [f for f in schema.get("required", []) if f not in ignore_fields]
38 # jsonschema doesn't accept 'required': [] yet.
39 # See https://github.com/Julian/jsonschema/issues/337.
40 # In the meantime, strip out 'required' if no other fields are required.
41 if required_fields:
42 schema = {**schema, "required": required_fields}
43 else:
44 schema = {f: v for f, v in schema.items() if f != "required"}
45
46 data = {f: v for f, v in data.items() if f not in ignore_fields}
47
48 try:
49 validate(data, schema)
50 except ValidationError as e:
51 if e.path:
52 field = e.path[-1]
53 elif e.validator_value:
54 field = e.validator_value[-1]
55 else:
56 field = e.schema_path[-1]
57 e.field = field
58 raise e
59 # Raise an error here if a reference in the schema doesn't resolve.
60 # jsonschema doesn't provide schema validation checking upon creation yet,
61 # it must be validated against data.
62 # See https://github.com/Julian/jsonschema/issues/399
63 # For future support https://github.com/Julian/jsonschema/issues/346.
64 except RefResolutionError as e:
65 raise e
66
67
68 def validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):
69 """Lookup in the parent objects if a schema was defined for this resource.
70
71 If the schema validation feature is enabled, if a schema is/are defined, and if the
72 data does not validate it/them, then it raises a 400 exception.
73 """
74 settings = request.registry.settings
75 schema_validation = "experimental_collection_schema_validation"
76 # If disabled from settings, do nothing.
77 if not asbool(settings.get(schema_validation)):
78 return
79
80 bucket_id = request.matchdict["bucket_id"]
81 bucket_uri = utils.instance_uri(request, "bucket", id=bucket_id)
82 buckets = request.bound_data.setdefault("buckets", {})
83 if bucket_uri not in buckets:
84 # Unknown yet, fetch from storage.
85 bucket = object_exists_or_404(
86 request, collection_id="bucket", parent_id="", object_id=bucket_id
87 )
88 buckets[bucket_uri] = bucket
89
90 # Let's see if the bucket defines a schema for this resource.
91 metadata_field = "{}:schema".format(resource_name)
92 bucket = buckets[bucket_uri]
93 if metadata_field not in bucket:
94 return
95
96 # Validate or fail with 400.
97 schema = bucket[metadata_field]
98 try:
99 validate_schema(data, schema, ignore_fields=ignore_fields)
100 except ValidationError as e:
101 raise_invalid(request, name=e.field, description=e.message)
102 except RefResolutionError as e:
103 raise_invalid(request, name="schema", description=str(e))
104
[end of kinto/schema_validation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py
--- a/kinto/schema_validation.py
+++ b/kinto/schema_validation.py
@@ -1,5 +1,5 @@
import colander
-from jsonschema import Draft4Validator, ValidationError, SchemaError, RefResolutionError, validate
+from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate
from pyramid.settings import asbool
from kinto.core import utils
@@ -27,7 +27,7 @@
def check_schema(data):
try:
- Draft4Validator.check_schema(data)
+ Draft7Validator.check_schema(data)
except SchemaError as e:
message = e.path.pop() + e.message
raise ValidationError(message)
| {"golden_diff": "diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py\n--- a/kinto/schema_validation.py\n+++ b/kinto/schema_validation.py\n@@ -1,5 +1,5 @@\n import colander\n-from jsonschema import Draft4Validator, ValidationError, SchemaError, RefResolutionError, validate\n+from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate\n from pyramid.settings import asbool\n \n from kinto.core import utils\n@@ -27,7 +27,7 @@\n \n def check_schema(data):\n try:\n- Draft4Validator.check_schema(data)\n+ Draft7Validator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\n", "issue": "Migrate to JSONschema Draft v7\nCurrently we use Draft4. The library that we use supports Draft7\r\n\r\nhttps://json-schema.org/specification.html\r\n\r\nThe idea would be:\r\n* to use `Draft7Validator` instead of `Draft4Validator` (nothing more...)\r\n* list main changes and add links in CHANGELOG (see spec release notes)\r\n* check compatibility with kinto-admin\r\n\n", "before_files": [{"content": "import colander\nfrom jsonschema import Draft4Validator, ValidationError, SchemaError, RefResolutionError, validate\nfrom pyramid.settings import asbool\n\nfrom kinto.core import utils\nfrom kinto.core.errors import raise_invalid\nfrom kinto.views import object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown=\"preserve\")\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super().deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n try:\n check_schema(validated)\n except ValidationError as e:\n self.raise_invalid(e.message)\n return validated\n\n\ndef check_schema(data):\n try:\n Draft4Validator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\n\n\ndef validate_schema(data, schema, ignore_fields=[]):\n required_fields = [f for f in schema.get(\"required\", []) if f not in ignore_fields]\n # jsonschema doesn't accept 'required': [] yet.\n # See https://github.com/Julian/jsonschema/issues/337.\n # In the meantime, strip out 'required' if no other fields are required.\n if required_fields:\n schema = {**schema, \"required\": required_fields}\n else:\n schema = {f: v for f, v in schema.items() if f != \"required\"}\n\n data = {f: v for f, v in data.items() if f not in ignore_fields}\n\n try:\n validate(data, schema)\n except ValidationError as e:\n if e.path:\n field = e.path[-1]\n elif e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n e.field = field\n raise e\n # Raise an error here if a reference in the schema doesn't resolve.\n # jsonschema doesn't provide schema validation checking upon creation yet,\n # it must be validated against data.\n # See https://github.com/Julian/jsonschema/issues/399\n # For future support https://github.com/Julian/jsonschema/issues/346.\n except RefResolutionError as e:\n raise e\n\n\ndef validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):\n \"\"\"Lookup in the parent objects if a schema was defined for this resource.\n\n If the schema validation feature is enabled, if a schema is/are defined, and if the\n data does not validate it/them, then it raises a 400 exception.\n \"\"\"\n settings = request.registry.settings\n schema_validation = \"experimental_collection_schema_validation\"\n # If disabled from settings, do nothing.\n if not asbool(settings.get(schema_validation)):\n return\n\n bucket_id = request.matchdict[\"bucket_id\"]\n bucket_uri = utils.instance_uri(request, \"bucket\", id=bucket_id)\n buckets = request.bound_data.setdefault(\"buckets\", {})\n if bucket_uri not in buckets:\n # Unknown yet, fetch from storage.\n bucket = object_exists_or_404(\n request, collection_id=\"bucket\", parent_id=\"\", object_id=bucket_id\n )\n buckets[bucket_uri] = bucket\n\n # Let's see if the bucket defines a schema for this resource.\n metadata_field = \"{}:schema\".format(resource_name)\n bucket = buckets[bucket_uri]\n if metadata_field not in bucket:\n return\n\n # Validate or fail with 400.\n schema = bucket[metadata_field]\n try:\n validate_schema(data, schema, ignore_fields=ignore_fields)\n except ValidationError as e:\n raise_invalid(request, name=e.field, description=e.message)\n except RefResolutionError as e:\n raise_invalid(request, name=\"schema\", description=str(e))\n", "path": "kinto/schema_validation.py"}]} | 1,691 | 162 |
gh_patches_debug_12854 | rasdani/github-patches | git_diff | librosa__librosa-1457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update show_versions to match current dependencies
**Describe the bug**
Looks like we forgot to revise the list of modules checked by `show_versions()` in the 0.9.0 release.
This isn't a major problem, but we should fix it for 0.9.2 and keep it as part of the release audit sequence going forward.
In general, `show_versions()` should track the dependencies listed in setup.cfg: https://github.com/librosa/librosa/blob/ef482b824c609222abb265357f7a79b11d174dd2/setup.cfg#L45-L84
Update show_versions to match current dependencies
**Describe the bug**
Looks like we forgot to revise the list of modules checked by `show_versions()` in the 0.9.0 release.
This isn't a major problem, but we should fix it for 0.9.2 and keep it as part of the release audit sequence going forward.
In general, `show_versions()` should track the dependencies listed in setup.cfg: https://github.com/librosa/librosa/blob/ef482b824c609222abb265357f7a79b11d174dd2/setup.cfg#L45-L84
</issue>
<code>
[start of librosa/version.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """Version info"""
4
5 import sys
6 import importlib
7
8 short_version = "0.9"
9 version = "0.9.1"
10
11
12 def __get_mod_version(modname):
13
14 try:
15 if modname in sys.modules:
16 mod = sys.modules[modname]
17 else:
18 mod = importlib.import_module(modname)
19 try:
20 return mod.__version__
21 except AttributeError:
22 return "installed, no version number available"
23
24 except ImportError:
25 return None
26
27
28 def show_versions():
29 """Return the version information for all librosa dependencies."""
30
31 core_deps = [
32 "audioread",
33 "numpy",
34 "scipy",
35 "sklearn",
36 "joblib",
37 "decorator",
38 "soundfile",
39 "resampy",
40 "numba",
41 ]
42
43 extra_deps = [
44 "numpydoc",
45 "sphinx",
46 "sphinx_rtd_theme",
47 "sphinxcontrib.versioning",
48 "sphinx-gallery",
49 "pytest",
50 "pytest-mpl",
51 "pytest-cov",
52 "matplotlib",
53 "presets",
54 ]
55
56 print("INSTALLED VERSIONS")
57 print("------------------")
58 print("python: {}\n".format(sys.version))
59 print("librosa: {}\n".format(version))
60 for dep in core_deps:
61 print("{}: {}".format(dep, __get_mod_version(dep)))
62 print("")
63 for dep in extra_deps:
64 print("{}: {}".format(dep, __get_mod_version(dep)))
65
[end of librosa/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/librosa/version.py b/librosa/version.py
--- a/librosa/version.py
+++ b/librosa/version.py
@@ -38,18 +38,26 @@
"soundfile",
"resampy",
"numba",
+ "pooch",
+ "packaging"
]
extra_deps = [
"numpydoc",
"sphinx",
"sphinx_rtd_theme",
- "sphinxcontrib.versioning",
- "sphinx-gallery",
+ "sphinx_multiversion",
+ "sphinx_gallery",
+ "mir_eval",
+ "ipython",
+ "sphinxcontrib-svg2pdfconverter",
"pytest",
"pytest-mpl",
"pytest-cov",
"matplotlib",
+ "samplerate",
+ "soxr",
+ "contextlib2",
"presets",
]
| {"golden_diff": "diff --git a/librosa/version.py b/librosa/version.py\n--- a/librosa/version.py\n+++ b/librosa/version.py\n@@ -38,18 +38,26 @@\n \"soundfile\",\n \"resampy\",\n \"numba\",\n+ \"pooch\",\n+ \"packaging\"\n ]\n \n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n- \"sphinxcontrib.versioning\",\n- \"sphinx-gallery\",\n+ \"sphinx_multiversion\",\n+ \"sphinx_gallery\",\n+ \"mir_eval\",\n+ \"ipython\",\n+ \"sphinxcontrib-svg2pdfconverter\",\n \"pytest\",\n \"pytest-mpl\",\n \"pytest-cov\",\n \"matplotlib\",\n+ \"samplerate\",\n+ \"soxr\",\n+ \"contextlib2\",\n \"presets\",\n ]\n", "issue": "Update show_versions to match current dependencies\n**Describe the bug**\r\n\r\nLooks like we forgot to revise the list of modules checked by `show_versions()` in the 0.9.0 release.\r\nThis isn't a major problem, but we should fix it for 0.9.2 and keep it as part of the release audit sequence going forward.\r\n\r\nIn general, `show_versions()` should track the dependencies listed in setup.cfg: https://github.com/librosa/librosa/blob/ef482b824c609222abb265357f7a79b11d174dd2/setup.cfg#L45-L84\nUpdate show_versions to match current dependencies\n**Describe the bug**\r\n\r\nLooks like we forgot to revise the list of modules checked by `show_versions()` in the 0.9.0 release.\r\nThis isn't a major problem, but we should fix it for 0.9.2 and keep it as part of the release audit sequence going forward.\r\n\r\nIn general, `show_versions()` should track the dependencies listed in setup.cfg: https://github.com/librosa/librosa/blob/ef482b824c609222abb265357f7a79b11d174dd2/setup.cfg#L45-L84\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.9\"\nversion = \"0.9.1\"\n\n\ndef __get_mod_version(modname):\n\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions():\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"soundfile\",\n \"resampy\",\n \"numba\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib.versioning\",\n \"sphinx-gallery\",\n \"pytest\",\n \"pytest-mpl\",\n \"pytest-cov\",\n \"matplotlib\",\n \"presets\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(\"python: {}\\n\".format(sys.version))\n print(\"librosa: {}\\n\".format(version))\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}]} | 1,289 | 204 |
gh_patches_debug_10338 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-449 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Region list menu points to api
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
The menu item for regions points to the region api and not the region list.
### Steps to Reproduce
1. Go to 'Regions'
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
The region list (`/regions/`) opens.
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
The region api (`/api/regions/`) opens.
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
This is because both urls have the name 'regions' and the last matching item in urlpatterns determines the actual url.
Region list menu points to api
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
The menu item for regions points to the region api and not the region list.
### Steps to Reproduce
1. Go to 'Regions'
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
The region list (`/regions/`) opens.
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
The region api (`/api/regions/`) opens.
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
This is because both urls have the name 'regions' and the last matching item in urlpatterns determines the actual url.
</issue>
<code>
[start of src/api/urls.py]
1 '''
2 Expansion of API-Endpoints for the CMS
3 '''
4 from django.conf.urls import include, url
5
6 from .v3.feedback import (
7 page_feedback,
8 search_result_feedback,
9 region_feedback,
10 offer_list_feedback,
11 event_list_feedback)
12 from .v3.languages import languages
13 from .v3.pages import pages
14 from .v3.push_notifications import sent_push_notifications
15 from .v3.regions import regions, liveregions, hiddenregions, pushnew
16 from .v3.offers import offers
17 from .v3.single_page import single_page
18
19 urlpatterns = [
20 url(r'regions/$', regions, name='regions'),
21 url(r'regions/live/$', liveregions, name='liveregions'),
22 url(r'regions/hidden/$', hiddenregions, name='hiddenregions'),
23 url(r'regions/pushnew/$', pushnew, name='pushnew'),
24 url(r'(?P<region_slug>[-\w]+)/', include([
25 url(r'languages/$', languages),
26 url(r'offers/$', offers),
27 url(r'(?P<lan_code>[-\w]+)/sent_push_notifications/$', sent_push_notifications),
28 url(r'(?P<languages>[-\w]+)/feedback/$', page_feedback.feedback),
29 url(r'(?P<language_code>[-\w]+)/feedback/categories$', region_feedback.region_feedback),
30 url(r'(?P<language_code>[-\w]+)/feedback/search$', search_result_feedback.search_result_feedback),
31 url(r'(?P<language_code>[-\w]+)/feedback/extras$', offer_list_feedback.offer_list_feedback),
32 url(r'(?P<language_code>[-\w]+)/feedback/events$', event_list_feedback.event_list_feedback),
33 url(r'(?P<language_code>[-\w]+)/pages/$', pages),
34 url(r'(?P<language_code>[-\w]+)/offers/$', offers),
35 url(r'(?P<language_code>[-\w]+)/page/$', single_page),
36 ])),
37 ]
38
[end of src/api/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/api/urls.py b/src/api/urls.py
--- a/src/api/urls.py
+++ b/src/api/urls.py
@@ -17,10 +17,10 @@
from .v3.single_page import single_page
urlpatterns = [
- url(r'regions/$', regions, name='regions'),
- url(r'regions/live/$', liveregions, name='liveregions'),
- url(r'regions/hidden/$', hiddenregions, name='hiddenregions'),
- url(r'regions/pushnew/$', pushnew, name='pushnew'),
+ url(r'regions/$', regions),
+ url(r'regions/live/$', liveregions),
+ url(r'regions/hidden/$', hiddenregions),
+ url(r'regions/pushnew/$', pushnew),
url(r'(?P<region_slug>[-\w]+)/', include([
url(r'languages/$', languages),
url(r'offers/$', offers),
| {"golden_diff": "diff --git a/src/api/urls.py b/src/api/urls.py\n--- a/src/api/urls.py\n+++ b/src/api/urls.py\n@@ -17,10 +17,10 @@\n from .v3.single_page import single_page\n \n urlpatterns = [\n- url(r'regions/$', regions, name='regions'),\n- url(r'regions/live/$', liveregions, name='liveregions'),\n- url(r'regions/hidden/$', hiddenregions, name='hiddenregions'),\n- url(r'regions/pushnew/$', pushnew, name='pushnew'),\n+ url(r'regions/$', regions),\n+ url(r'regions/live/$', liveregions),\n+ url(r'regions/hidden/$', hiddenregions),\n+ url(r'regions/pushnew/$', pushnew),\n url(r'(?P<region_slug>[-\\w]+)/', include([\n url(r'languages/$', languages),\n url(r'offers/$', offers),\n", "issue": "Region list menu points to api\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe menu item for regions points to the region api and not the region list.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to 'Regions'\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe region list (`/regions/`) opens.\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nThe region api (`/api/regions/`) opens.\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\nThis is because both urls have the name 'regions' and the last matching item in urlpatterns determines the actual url.\r\n\nRegion list menu points to api\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe menu item for regions points to the region api and not the region list.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to 'Regions'\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe region list (`/regions/`) opens.\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nThe region api (`/api/regions/`) opens.\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\nThis is because both urls have the name 'regions' and the last matching item in urlpatterns determines the actual url.\r\n\n", "before_files": [{"content": "'''\nExpansion of API-Endpoints for the CMS\n'''\nfrom django.conf.urls import include, url\n\nfrom .v3.feedback import (\n page_feedback,\n search_result_feedback,\n region_feedback,\n offer_list_feedback,\n event_list_feedback)\nfrom .v3.languages import languages\nfrom .v3.pages import pages\nfrom .v3.push_notifications import sent_push_notifications\nfrom .v3.regions import regions, liveregions, hiddenregions, pushnew\nfrom .v3.offers import offers\nfrom .v3.single_page import single_page\n\nurlpatterns = [\n url(r'regions/$', regions, name='regions'),\n url(r'regions/live/$', liveregions, name='liveregions'),\n url(r'regions/hidden/$', hiddenregions, name='hiddenregions'),\n url(r'regions/pushnew/$', pushnew, name='pushnew'),\n url(r'(?P<region_slug>[-\\w]+)/', include([\n url(r'languages/$', languages),\n url(r'offers/$', offers),\n url(r'(?P<lan_code>[-\\w]+)/sent_push_notifications/$', sent_push_notifications),\n url(r'(?P<languages>[-\\w]+)/feedback/$', page_feedback.feedback),\n url(r'(?P<language_code>[-\\w]+)/feedback/categories$', region_feedback.region_feedback),\n url(r'(?P<language_code>[-\\w]+)/feedback/search$', search_result_feedback.search_result_feedback),\n url(r'(?P<language_code>[-\\w]+)/feedback/extras$', offer_list_feedback.offer_list_feedback),\n url(r'(?P<language_code>[-\\w]+)/feedback/events$', event_list_feedback.event_list_feedback),\n url(r'(?P<language_code>[-\\w]+)/pages/$', pages),\n url(r'(?P<language_code>[-\\w]+)/offers/$', offers),\n url(r'(?P<language_code>[-\\w]+)/page/$', single_page),\n ])),\n]\n", "path": "src/api/urls.py"}]} | 1,336 | 213 |
gh_patches_debug_2228 | rasdani/github-patches | git_diff | rucio__rucio-2492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue in client_extract download
Motivation
----------
Modification
------------
</issue>
<code>
[start of lib/rucio/vcsversion.py]
1
2 '''
3 This file is automatically generated; Do not edit it. :)
4 '''
5 VERSION_INFO = {
6 'final': True,
7 'version': '1.19.5',
8 'branch_nick': 'patch-0-1_19_5_preparation',
9 'revision_id': '9e14d56c9d958e5348b19ddc7e5fa45d4a778807',
10 'revno': 7951
11 }
12
[end of lib/rucio/vcsversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py
--- a/lib/rucio/vcsversion.py
+++ b/lib/rucio/vcsversion.py
@@ -4,8 +4,8 @@
'''
VERSION_INFO = {
'final': True,
- 'version': '1.19.5',
- 'branch_nick': 'patch-0-1_19_5_preparation',
- 'revision_id': '9e14d56c9d958e5348b19ddc7e5fa45d4a778807',
- 'revno': 7951
+ 'version': '1.19.6',
+ 'branch_nick': 'patch-0-Release__Rucio_1_19_6_preparation',
+ 'revision_id': 'a8c639a7a70a9e605ad90535d28d2eab04d89cce',
+ 'revno': 7992
}
| {"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.19.5',\n- 'branch_nick': 'patch-0-1_19_5_preparation',\n- 'revision_id': '9e14d56c9d958e5348b19ddc7e5fa45d4a778807',\n- 'revno': 7951\n+ 'version': '1.19.6',\n+ 'branch_nick': 'patch-0-Release__Rucio_1_19_6_preparation',\n+ 'revision_id': 'a8c639a7a70a9e605ad90535d28d2eab04d89cce',\n+ 'revno': 7992\n }\n", "issue": "Issue in client_extract download \nMotivation\r\n----------\r\n\r\n\r\n\r\nModification\r\n------------\r\n\r\n\r\n\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.19.5',\n 'branch_nick': 'patch-0-1_19_5_preparation',\n 'revision_id': '9e14d56c9d958e5348b19ddc7e5fa45d4a778807',\n 'revno': 7951\n}\n", "path": "lib/rucio/vcsversion.py"}]} | 687 | 253 |
gh_patches_debug_16416 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1094 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docker language fails on Windows - os.getuid()
[`docker_cmd`](https://github.com/pre-commit/pre-commit/blob/0cf2638b3b6a2cfde89cb7013e61b2d4a3e12875/pre_commit/languages/docker.py#L80) in languages/docker.py uses `os.getuid()` and `os.getgid()` that don't exist in Python on Windows.
A hook that uses Docker on Windows fails to execute with
```
An unexpected error has occurred: AttributeError: module 'os' has no attribute 'getuid'
Traceback (most recent call last):
File "C:\Python36\lib\site-packages\pre_commit\error_handler.py", line 46, in error_handler
yield
File "C:\Python36\lib\site-packages\pre_commit\main.py", line 294, in main
return run(args.config, store, args)
File "C:\Python36\lib\site-packages\pre_commit\commands\run.py", line 293, in run
return _run_hooks(config, hooks, args, environ)
File "C:\Python36\lib\site-packages\pre_commit\commands\run.py", line 214, in _run_hooks
retval |= _run_single_hook(classifier, hook, args, skips, cols)
File "C:\Python36\lib\site-packages\pre_commit\commands\run.py", line 124, in _run_single_hook
tuple(filenames) if hook.pass_filenames else (),
File "C:\Python36\lib\site-packages\pre_commit\repository.py", line 100, in run
return lang.run_hook(self, file_args)
File "C:\Python36\lib\site-packages\pre_commit\languages\docker.py", line 99, in run_hook
cmd = docker_cmd() + entry_tag + cmd_rest
File "C:\Python36\lib\site-packages\pre_commit\languages\docker.py", line 80, in docker_cmd
'-u', '{}:{}'.format(os.getuid(), os.getgid()),
AttributeError: module 'os' has no attribute 'getuid'
```
The same hook works fine in Windows Subsystem for Linux.
</issue>
<code>
[start of pre_commit/languages/docker.py]
1 from __future__ import absolute_import
2 from __future__ import unicode_literals
3
4 import hashlib
5 import os
6
7 import pre_commit.constants as C
8 from pre_commit import five
9 from pre_commit.languages import helpers
10 from pre_commit.util import CalledProcessError
11 from pre_commit.util import clean_path_on_failure
12 from pre_commit.util import cmd_output
13
14
15 ENVIRONMENT_DIR = 'docker'
16 PRE_COMMIT_LABEL = 'PRE_COMMIT'
17 get_default_version = helpers.basic_get_default_version
18 healthy = helpers.basic_healthy
19
20
21 def md5(s): # pragma: windows no cover
22 return hashlib.md5(five.to_bytes(s)).hexdigest()
23
24
25 def docker_tag(prefix): # pragma: windows no cover
26 md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()
27 return 'pre-commit-{}'.format(md5sum)
28
29
30 def docker_is_running(): # pragma: windows no cover
31 try:
32 return cmd_output('docker', 'ps')[0] == 0
33 except CalledProcessError:
34 return False
35
36
37 def assert_docker_available(): # pragma: windows no cover
38 assert docker_is_running(), (
39 'Docker is either not running or not configured in this environment'
40 )
41
42
43 def build_docker_image(prefix, **kwargs): # pragma: windows no cover
44 pull = kwargs.pop('pull')
45 assert not kwargs, kwargs
46 cmd = (
47 'docker', 'build',
48 '--tag', docker_tag(prefix),
49 '--label', PRE_COMMIT_LABEL,
50 )
51 if pull:
52 cmd += ('--pull',)
53 # This must come last for old versions of docker. See #477
54 cmd += ('.',)
55 helpers.run_setup_cmd(prefix, cmd)
56
57
58 def install_environment(
59 prefix, version, additional_dependencies,
60 ): # pragma: windows no cover
61 helpers.assert_version_default('docker', version)
62 helpers.assert_no_additional_deps('docker', additional_dependencies)
63 assert_docker_available()
64
65 directory = prefix.path(
66 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
67 )
68
69 # Docker doesn't really have relevant disk environment, but pre-commit
70 # still needs to cleanup it's state files on failure
71 with clean_path_on_failure(directory):
72 build_docker_image(prefix, pull=True)
73 os.mkdir(directory)
74
75
76 def docker_cmd(): # pragma: windows no cover
77 return (
78 'docker', 'run',
79 '--rm',
80 '-u', '{}:{}'.format(os.getuid(), os.getgid()),
81 # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from
82 # The `Z` option tells Docker to label the content with a private
83 # unshared label. Only the current container can use a private volume.
84 '-v', '{}:/src:rw,Z'.format(os.getcwd()),
85 '--workdir', '/src',
86 )
87
88
89 def run_hook(hook, file_args): # pragma: windows no cover
90 assert_docker_available()
91 # Rebuild the docker image in case it has gone missing, as many people do
92 # automated cleanup of docker images.
93 build_docker_image(hook.prefix, pull=False)
94
95 hook_cmd = helpers.to_cmd(hook)
96 entry_exe, cmd_rest = hook_cmd[0], hook_cmd[1:]
97
98 entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))
99 cmd = docker_cmd() + entry_tag + cmd_rest
100 return helpers.run_xargs(hook, cmd, file_args)
101
[end of pre_commit/languages/docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py
--- a/pre_commit/languages/docker.py
+++ b/pre_commit/languages/docker.py
@@ -73,11 +73,18 @@
os.mkdir(directory)
+def get_docker_user(): # pragma: windows no cover
+ try:
+ return '{}:{}'.format(os.getuid(), os.getgid())
+ except AttributeError:
+ return '1000:1000'
+
+
def docker_cmd(): # pragma: windows no cover
return (
'docker', 'run',
'--rm',
- '-u', '{}:{}'.format(os.getuid(), os.getgid()),
+ '-u', get_docker_user(),
# https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from
# The `Z` option tells Docker to label the content with a private
# unshared label. Only the current container can use a private volume.
| {"golden_diff": "diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py\n--- a/pre_commit/languages/docker.py\n+++ b/pre_commit/languages/docker.py\n@@ -73,11 +73,18 @@\n os.mkdir(directory)\n \n \n+def get_docker_user(): # pragma: windows no cover\n+ try:\n+ return '{}:{}'.format(os.getuid(), os.getgid())\n+ except AttributeError:\n+ return '1000:1000'\n+\n+\n def docker_cmd(): # pragma: windows no cover\n return (\n 'docker', 'run',\n '--rm',\n- '-u', '{}:{}'.format(os.getuid(), os.getgid()),\n+ '-u', get_docker_user(),\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n", "issue": "Docker language fails on Windows - os.getuid()\n[`docker_cmd`](https://github.com/pre-commit/pre-commit/blob/0cf2638b3b6a2cfde89cb7013e61b2d4a3e12875/pre_commit/languages/docker.py#L80) in languages/docker.py uses `os.getuid()` and `os.getgid()` that don't exist in Python on Windows.\r\n\r\nA hook that uses Docker on Windows fails to execute with\r\n\r\n```\r\nAn unexpected error has occurred: AttributeError: module 'os' has no attribute 'getuid'\r\nTraceback (most recent call last):\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\error_handler.py\", line 46, in error_handler\r\n yield\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\main.py\", line 294, in main\r\n return run(args.config, store, args)\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 293, in run\r\n return _run_hooks(config, hooks, args, environ)\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 214, in _run_hooks\r\n retval |= _run_single_hook(classifier, hook, args, skips, cols)\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 124, in _run_single_hook\r\n tuple(filenames) if hook.pass_filenames else (),\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\repository.py\", line 100, in run\r\n return lang.run_hook(self, file_args)\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\languages\\docker.py\", line 99, in run_hook\r\n cmd = docker_cmd() + entry_tag + cmd_rest\r\n File \"C:\\Python36\\lib\\site-packages\\pre_commit\\languages\\docker.py\", line 80, in docker_cmd\r\n '-u', '{}:{}'.format(os.getuid(), os.getgid()),\r\nAttributeError: module 'os' has no attribute 'getuid'\r\n```\r\n\r\nThe same hook works fine in Windows Subsystem for Linux.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport os\n\nimport pre_commit.constants as C\nfrom pre_commit import five\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef md5(s): # pragma: windows no cover\n return hashlib.md5(five.to_bytes(s)).hexdigest()\n\n\ndef docker_tag(prefix): # pragma: windows no cover\n md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()\n return 'pre-commit-{}'.format(md5sum)\n\n\ndef docker_is_running(): # pragma: windows no cover\n try:\n return cmd_output('docker', 'ps')[0] == 0\n except CalledProcessError:\n return False\n\n\ndef assert_docker_available(): # pragma: windows no cover\n assert docker_is_running(), (\n 'Docker is either not running or not configured in this environment'\n )\n\n\ndef build_docker_image(prefix, **kwargs): # pragma: windows no cover\n pull = kwargs.pop('pull')\n assert not kwargs, kwargs\n cmd = (\n 'docker', 'build',\n '--tag', docker_tag(prefix),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n # This must come last for old versions of docker. See #477\n cmd += ('.',)\n helpers.run_setup_cmd(prefix, cmd)\n\n\ndef install_environment(\n prefix, version, additional_dependencies,\n): # pragma: windows no cover\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n assert_docker_available()\n\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup it's state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(prefix, pull=True)\n os.mkdir(directory)\n\n\ndef docker_cmd(): # pragma: windows no cover\n return (\n 'docker', 'run',\n '--rm',\n '-u', '{}:{}'.format(os.getuid(), os.getgid()),\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n '-v', '{}:/src:rw,Z'.format(os.getcwd()),\n '--workdir', '/src',\n )\n\n\ndef run_hook(hook, file_args): # pragma: windows no cover\n assert_docker_available()\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(hook.prefix, pull=False)\n\n hook_cmd = helpers.to_cmd(hook)\n entry_exe, cmd_rest = hook_cmd[0], hook_cmd[1:]\n\n entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))\n cmd = docker_cmd() + entry_tag + cmd_rest\n return helpers.run_xargs(hook, cmd, file_args)\n", "path": "pre_commit/languages/docker.py"}]} | 1,998 | 220 |
gh_patches_debug_24309 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1356 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WV: subcommittees treated as full committees
Hello,
So, with WV, the subcommittees (e.g., Finance subcommittees A, B, C) are not identified as a subcommittee.
I know WV is a [round C state](https://github.com/openstates/planning/issues/11), but I thought it was worth mentioning...if only for a "TODO later" note.
Best,
Alex
</issue>
<code>
[start of openstates/wv/committees.py]
1 import re
2
3 from billy.scrape.committees import CommitteeScraper, Committee
4
5 import lxml.html
6
7
8 class WVCommitteeScraper(CommitteeScraper):
9 jurisdiction = "wv"
10
11 def scrape(self, chamber, term):
12 getattr(self, 'scrape_' + chamber)()
13
14 def scrape_lower(self):
15 url = 'http://www.legis.state.wv.us/committees/house/main.cfm'
16 html = self.get(url).text
17 doc = lxml.html.fromstring(html)
18 doc.make_links_absolute(url)
19
20 xpath = '//a[contains(@href, "HouseCommittee")]'
21 for link in doc.xpath(xpath):
22 text = link.text_content().strip()
23 if text == '-':
24 continue
25 committee = self.scrape_lower_committee(link=link, name=text)
26 committee.add_source(url)
27 self.save_committee(committee)
28
29 url = 'http://www.legis.state.wv.us/committees/interims/interims.cfm'
30 html = self.get(url).text
31 doc = lxml.html.fromstring(html)
32 doc.make_links_absolute(url)
33 xpath = '//a[contains(@href, "committee.cfm")]'
34 for link in doc.xpath(xpath):
35 text = link.text_content().strip()
36 if text == '-':
37 continue
38 committee = self.scrape_interim_committee(link=link, name=text)
39 committee.add_source(url)
40 self.save_committee(committee)
41
42 def scrape_lower_committee(self, link, name):
43 url = re.sub(r'\s+', '', link.attrib['href'])
44 html = self.get(url).text
45 doc = lxml.html.fromstring(html)
46 doc.make_links_absolute(url)
47
48 comm = Committee('lower', name)
49 comm.add_source(url)
50
51 xpath = '//a[contains(@href, "?member=")]'
52 for link in doc.xpath(xpath):
53 name = link.text_content().strip()
54 name = re.sub(r'^Delegate\s+', '', name)
55 role = link.getnext().text or 'member'
56 comm.add_member(name, role.strip())
57
58 return comm
59
60 def scrape_interim_committee(self, link, name):
61 url = re.sub(r'\s+', '', link.attrib['href'])
62 html = self.get(url).text
63 doc = lxml.html.fromstring(html)
64 doc.make_links_absolute(url)
65
66 comm = Committee('joint', name)
67 comm.add_source(url)
68
69 xpath = '//a[contains(@href, "?member=")]'
70 for link in doc.xpath(xpath):
71 name = link.text_content().strip()
72 name = re.sub(r'^Delegate\s+', '', name)
73 name = re.sub(r'^Senator\s+', '', name)
74 role = link.getnext().text or 'member'
75 comm.add_member(name, role.strip())
76
77 return comm
78
79 def scrape_upper(self):
80 url = 'http://www.legis.state.wv.us/committees/senate/main.cfm'
81 html = self.get(url).text
82 doc = lxml.html.fromstring(html)
83 doc.make_links_absolute(url)
84
85 xpath = '//a[contains(@href, "SenateCommittee")]'
86 for link in doc.xpath(xpath):
87 text = link.text_content().strip()
88 if text == '-':
89 continue
90 committee = self.scrape_upper_committee(link=link, name=text)
91 committee.add_source(url)
92 self.save_committee(committee)
93
94 def scrape_upper_committee(self, link, name):
95 url = re.sub(r'\s+', '', link.attrib['href'])
96 html = self.get(url).text
97 doc = lxml.html.fromstring(html)
98 doc.make_links_absolute(url)
99
100 comm = Committee('upper', name)
101 comm.add_source(url)
102
103 xpath = '//a[contains(@href, "?member=")]'
104 for link in doc.xpath(xpath):
105 name = link.text_content().strip()
106 name = re.sub(r'^Delegate\s+', '', name)
107 role = link.getnext().text or 'member'
108 comm.add_member(name, role.strip())
109
110 return comm
111
[end of openstates/wv/committees.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/wv/committees.py b/openstates/wv/committees.py
--- a/openstates/wv/committees.py
+++ b/openstates/wv/committees.py
@@ -8,6 +8,14 @@
class WVCommitteeScraper(CommitteeScraper):
jurisdiction = "wv"
+ # Manually resolved links between subcommittees and parent committees.
+ subcommittee_parent_map = {
+ 'Post Audits Subcommittee': 'Government and Finance',
+ 'Parks, Recreation and Natural Resources Subcommittee': \
+ 'Government and Finance',
+ 'Tax Reform Subcommittee A': 'Joint Tax Reform',
+ }
+
def scrape(self, chamber, term):
getattr(self, 'scrape_' + chamber)()
@@ -63,7 +71,16 @@
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
- comm = Committee('joint', name)
+ if 'Subcommittee' in name:
+ # Check whether the parent committee is manually defined first
+ # before attempting to automatically resolve it.
+ parent = WVCommitteeScraper.subcommittee_parent_map.get(name, None)
+ if parent is None:
+ parent = name.partition('Subcommittee')[0].strip()
+
+ comm = Committee('joint', parent, subcommittee=name)
+ else:
+ comm = Committee('joint', name)
comm.add_source(url)
xpath = '//a[contains(@href, "?member=")]'
| {"golden_diff": "diff --git a/openstates/wv/committees.py b/openstates/wv/committees.py\n--- a/openstates/wv/committees.py\n+++ b/openstates/wv/committees.py\n@@ -8,6 +8,14 @@\n class WVCommitteeScraper(CommitteeScraper):\n jurisdiction = \"wv\"\n \n+ # Manually resolved links between subcommittees and parent committees.\n+ subcommittee_parent_map = {\n+ 'Post Audits Subcommittee': 'Government and Finance',\n+ 'Parks, Recreation and Natural Resources Subcommittee': \\\n+ 'Government and Finance',\n+ 'Tax Reform Subcommittee A': 'Joint Tax Reform',\n+ }\n+\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber)()\n \n@@ -63,7 +71,16 @@\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n \n- comm = Committee('joint', name)\n+ if 'Subcommittee' in name:\n+ # Check whether the parent committee is manually defined first\n+ # before attempting to automatically resolve it.\n+ parent = WVCommitteeScraper.subcommittee_parent_map.get(name, None)\n+ if parent is None:\n+ parent = name.partition('Subcommittee')[0].strip()\n+\n+ comm = Committee('joint', parent, subcommittee=name)\n+ else:\n+ comm = Committee('joint', name)\n comm.add_source(url)\n \n xpath = '//a[contains(@href, \"?member=\")]'\n", "issue": "WV: subcommittees treated as full committees\nHello,\r\n\r\nSo, with WV, the subcommittees (e.g., Finance subcommittees A, B, C) are not identified as a subcommittee.\r\n\r\nI know WV is a [round C state](https://github.com/openstates/planning/issues/11), but I thought it was worth mentioning...if only for a \"TODO later\" note.\r\n\r\nBest,\r\nAlex\n", "before_files": [{"content": "import re\n\nfrom billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\nclass WVCommitteeScraper(CommitteeScraper):\n jurisdiction = \"wv\"\n\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber)()\n\n def scrape_lower(self):\n url = 'http://www.legis.state.wv.us/committees/house/main.cfm'\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n xpath = '//a[contains(@href, \"HouseCommittee\")]'\n for link in doc.xpath(xpath):\n text = link.text_content().strip()\n if text == '-':\n continue\n committee = self.scrape_lower_committee(link=link, name=text)\n committee.add_source(url)\n self.save_committee(committee)\n\n url = 'http://www.legis.state.wv.us/committees/interims/interims.cfm'\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n xpath = '//a[contains(@href, \"committee.cfm\")]'\n for link in doc.xpath(xpath):\n text = link.text_content().strip()\n if text == '-':\n continue\n committee = self.scrape_interim_committee(link=link, name=text)\n committee.add_source(url)\n self.save_committee(committee)\n\n def scrape_lower_committee(self, link, name):\n url = re.sub(r'\\s+', '', link.attrib['href'])\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n comm = Committee('lower', name)\n comm.add_source(url)\n\n xpath = '//a[contains(@href, \"?member=\")]'\n for link in doc.xpath(xpath):\n name = link.text_content().strip()\n name = re.sub(r'^Delegate\\s+', '', name)\n role = link.getnext().text or 'member'\n comm.add_member(name, role.strip())\n\n return comm\n\n def scrape_interim_committee(self, link, name):\n url = re.sub(r'\\s+', '', link.attrib['href'])\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n comm = Committee('joint', name)\n comm.add_source(url)\n\n xpath = '//a[contains(@href, \"?member=\")]'\n for link in doc.xpath(xpath):\n name = link.text_content().strip()\n name = re.sub(r'^Delegate\\s+', '', name)\n name = re.sub(r'^Senator\\s+', '', name)\n role = link.getnext().text or 'member'\n comm.add_member(name, role.strip())\n\n return comm\n\n def scrape_upper(self):\n url = 'http://www.legis.state.wv.us/committees/senate/main.cfm'\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n xpath = '//a[contains(@href, \"SenateCommittee\")]'\n for link in doc.xpath(xpath):\n text = link.text_content().strip()\n if text == '-':\n continue\n committee = self.scrape_upper_committee(link=link, name=text)\n committee.add_source(url)\n self.save_committee(committee)\n\n def scrape_upper_committee(self, link, name):\n url = re.sub(r'\\s+', '', link.attrib['href'])\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n comm = Committee('upper', name)\n comm.add_source(url)\n\n xpath = '//a[contains(@href, \"?member=\")]'\n for link in doc.xpath(xpath):\n name = link.text_content().strip()\n name = re.sub(r'^Delegate\\s+', '', name)\n role = link.getnext().text or 'member'\n comm.add_member(name, role.strip())\n\n return comm\n", "path": "openstates/wv/committees.py"}]} | 1,734 | 333 |
gh_patches_debug_9551 | rasdani/github-patches | git_diff | airctic__icevision-539 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EfficientDet images_size has to be divisible by 128
## 📓 Documentation Update
**What part of documentation was unclear or wrong?**
It has to be clear that the image size for the efficientdet model has to be divisible by 128
**Describe the solution you'd like**
Add this information to the `efficientdet/model.py` docstring and also make it explicit in the tutorials that use efficientdet.
</issue>
<code>
[start of icevision/models/efficientdet/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5 from effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench
6 from effdet import create_model_from_config
7 from effdet.efficientdet import HeadNet
8
9
10 def model(
11 model_name: str, num_classes: int, img_size: int, pretrained: bool = True
12 ) -> nn.Module:
13 """Creates the efficientdet model specified by `model_name`.
14
15 The model implementation is by Ross Wightman, original repo
16 [here](https://github.com/rwightman/efficientdet-pytorch).
17
18 # Arguments
19 model_name: Specifies the model to create. For pretrained models, check
20 [this](https://github.com/rwightman/efficientdet-pytorch#models) table.
21 num_classes: Number of classes of your dataset (including background).
22 img_size: Image size that will be fed to the model. Must be squared and
23 divisible by 64.
24 pretrained: If True, use a pretrained backbone (on COCO).
25
26 # Returns
27 A PyTorch model.
28 """
29 config = get_efficientdet_config(model_name=model_name)
30 config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size
31
32 model_bench = create_model_from_config(
33 config,
34 bench_task="train",
35 bench_labeler=True,
36 num_classes=num_classes,
37 pretrained=pretrained,
38 )
39
40 # TODO: Break down param groups for backbone
41 def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:
42 unwrapped = unwrap_bench(model)
43
44 layers = [
45 unwrapped.backbone,
46 unwrapped.fpn,
47 nn.Sequential(unwrapped.class_net, unwrapped.box_net),
48 ]
49 param_groups = [list(layer.parameters()) for layer in layers]
50 check_all_model_params_in_groups2(model, param_groups)
51
52 return param_groups
53
54 model_bench.param_groups = MethodType(param_groups_fn, model_bench)
55
56 return model_bench
57
[end of icevision/models/efficientdet/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/efficientdet/model.py b/icevision/models/efficientdet/model.py
--- a/icevision/models/efficientdet/model.py
+++ b/icevision/models/efficientdet/model.py
@@ -20,7 +20,7 @@
[this](https://github.com/rwightman/efficientdet-pytorch#models) table.
num_classes: Number of classes of your dataset (including background).
img_size: Image size that will be fed to the model. Must be squared and
- divisible by 64.
+ divisible by 128.
pretrained: If True, use a pretrained backbone (on COCO).
# Returns
| {"golden_diff": "diff --git a/icevision/models/efficientdet/model.py b/icevision/models/efficientdet/model.py\n--- a/icevision/models/efficientdet/model.py\n+++ b/icevision/models/efficientdet/model.py\n@@ -20,7 +20,7 @@\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n- divisible by 64.\n+ divisible by 128.\n pretrained: If True, use a pretrained backbone (on COCO).\n \n # Returns\n", "issue": "EfficientDet images_size has to be divisible by 128\n## \ud83d\udcd3 Documentation Update\r\n**What part of documentation was unclear or wrong?**\r\nIt has to be clear that the image size for the efficientdet model has to be divisible by 128\r\n\r\n**Describe the solution you'd like**\r\nAdd this information to the `efficientdet/model.py` docstring and also make it explicit in the tutorials that use efficientdet.\r\n\r\n\r\n\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench\nfrom effdet import create_model_from_config\nfrom effdet.efficientdet import HeadNet\n\n\ndef model(\n model_name: str, num_classes: int, img_size: int, pretrained: bool = True\n) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n\n The model implementation is by Ross Wightman, original repo\n [here](https://github.com/rwightman/efficientdet-pytorch).\n\n # Arguments\n model_name: Specifies the model to create. For pretrained models, check\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n divisible by 64.\n pretrained: If True, use a pretrained backbone (on COCO).\n\n # Returns\n A PyTorch model.\n \"\"\"\n config = get_efficientdet_config(model_name=model_name)\n config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size\n\n model_bench = create_model_from_config(\n config,\n bench_task=\"train\",\n bench_labeler=True,\n num_classes=num_classes,\n pretrained=pretrained,\n )\n\n # TODO: Break down param groups for backbone\n def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:\n unwrapped = unwrap_bench(model)\n\n layers = [\n unwrapped.backbone,\n unwrapped.fpn,\n nn.Sequential(unwrapped.class_net, unwrapped.box_net),\n ]\n param_groups = [list(layer.parameters()) for layer in layers]\n check_all_model_params_in_groups2(model, param_groups)\n\n return param_groups\n\n model_bench.param_groups = MethodType(param_groups_fn, model_bench)\n\n return model_bench\n", "path": "icevision/models/efficientdet/model.py"}]} | 1,199 | 154 |
gh_patches_debug_17672 | rasdani/github-patches | git_diff | facebookresearch__hydra-234 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better error message if config structure passed to instantiate is invalid.
# 🐛 Bug
In [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in "Stack trace/error messages":
This is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages?
<!-- A clear and concise description of what the bug is. -->
## To reproduce
** Minimal Code/Config snippet to reproduce **
Set `params` to `bug_report` as in:
```YAML
class: ...
params: bug_report
```
** Stack trace/error message **
```
> AttributeError: 'str' object has no attribute 'merge_with'
[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'
```
**Optional video**
If applicable, record a short [video](https://asciinema.org) with to help explain your problem.
You can also
## Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
I expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided.
## System information
- <!-- Hydra Version --> Hydra Version: 0.9.0
- <!-- Operating system --> Operating System: Arch Linux
## Additional context
If you confirm that this should be fixed, I've already implemented the solution (IMO).
Better error message if config structure passed to instantiate is invalid.
# 🐛 Bug
In [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in "Stack trace/error messages":
This is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages?
<!-- A clear and concise description of what the bug is. -->
## To reproduce
** Minimal Code/Config snippet to reproduce **
Set `params` to `bug_report` as in:
```YAML
class: ...
params: bug_report
```
** Stack trace/error message **
```
> AttributeError: 'str' object has no attribute 'merge_with'
[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'
```
**Optional video**
If applicable, record a short [video](https://asciinema.org) with to help explain your problem.
You can also
## Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
I expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided.
## System information
- <!-- Hydra Version --> Hydra Version: 0.9.0
- <!-- Operating system --> Operating System: Arch Linux
## Additional context
If you confirm that this should be fixed, I've already implemented the solution (IMO).
</issue>
<code>
[start of hydra/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging.config
3
4 from omegaconf import OmegaConf
5
6 # pylint: disable=C0103
7 log = logging.getLogger(__name__)
8
9
10 def get_method(path):
11 return get_class(path)
12
13
14 def get_class(path):
15 try:
16 from importlib import import_module
17
18 module_path, _, class_name = path.rpartition(".")
19 mod = import_module(module_path)
20 try:
21 klass = getattr(mod, class_name)
22 except AttributeError:
23 raise ImportError(
24 "Class {} is not in module {}".format(class_name, module_path)
25 )
26 return klass
27 except ValueError as e:
28 log.error("Error initializing class " + path)
29 raise e
30
31
32 def get_static_method(full_method_name):
33 try:
34 spl = full_method_name.split(".")
35 method_name = spl.pop()
36 class_name = ".".join(spl)
37 clz = get_class(class_name)
38 return getattr(clz, method_name)
39 except Exception as e:
40 log.error("Error getting static method {} : {}".format(full_method_name, e))
41 raise e
42
43
44 def instantiate(config, *args, **kwargs):
45 assert config is not None, "Input config is None"
46 try:
47 clazz = get_class(config["class"])
48 params = config.params if "params" in config else OmegaConf.create()
49 params.merge_with(OmegaConf.create(kwargs))
50 return clazz(*args, **params)
51 except Exception as e:
52 log.error("Error instantiating {} : {}".format(config["class"], e))
53 raise e
54
[end of hydra/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/utils.py b/hydra/utils.py
--- a/hydra/utils.py
+++ b/hydra/utils.py
@@ -1,7 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging.config
-from omegaconf import OmegaConf
+from omegaconf import OmegaConf, DictConfig
# pylint: disable=C0103
log = logging.getLogger(__name__)
@@ -46,6 +46,11 @@
try:
clazz = get_class(config["class"])
params = config.params if "params" in config else OmegaConf.create()
+ assert isinstance(
+ params, DictConfig
+ ), "Input config params are expected to be a mapping, found {}".format(
+ type(config.params)
+ )
params.merge_with(OmegaConf.create(kwargs))
return clazz(*args, **params)
except Exception as e:
| {"golden_diff": "diff --git a/hydra/utils.py b/hydra/utils.py\n--- a/hydra/utils.py\n+++ b/hydra/utils.py\n@@ -1,7 +1,7 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import logging.config\n \n-from omegaconf import OmegaConf\n+from omegaconf import OmegaConf, DictConfig\n \n # pylint: disable=C0103\n log = logging.getLogger(__name__)\n@@ -46,6 +46,11 @@\n try:\n clazz = get_class(config[\"class\"])\n params = config.params if \"params\" in config else OmegaConf.create()\n+ assert isinstance(\n+ params, DictConfig\n+ ), \"Input config params are expected to be a mapping, found {}\".format(\n+ type(config.params)\n+ )\n params.merge_with(OmegaConf.create(kwargs))\n return clazz(*args, **params)\n except Exception as e:\n", "issue": "Better error message if config structure passed to instantiate is invalid.\n# \ud83d\udc1b Bug\r\n\r\nIn [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in \"Stack trace/error messages\":\r\n\r\nThis is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages? \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\nSet `params` to `bug_report` as in:\r\n\r\n```YAML\r\nclass: ...\r\nparams: bug_report\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\n> AttributeError: 'str' object has no attribute 'merge_with'\r\n[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'\r\n```\r\n**Optional video**\r\nIf applicable, record a short [video](https://asciinema.org) with to help explain your problem.\r\nYou can also \r\n\r\n## Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nI expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided. \r\n \r\n## System information\r\n- <!-- Hydra Version --> Hydra Version: 0.9.0\r\n\r\n- <!-- Operating system --> Operating System: Arch Linux\r\n\r\n## Additional context\r\nIf you confirm that this should be fixed, I've already implemented the solution (IMO). \r\n\nBetter error message if config structure passed to instantiate is invalid.\n# \ud83d\udc1b Bug\r\n\r\nIn [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in \"Stack trace/error messages\":\r\n\r\nThis is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages? \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\nSet `params` to `bug_report` as in:\r\n\r\n```YAML\r\nclass: ...\r\nparams: bug_report\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\n> AttributeError: 'str' object has no attribute 'merge_with'\r\n[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'\r\n```\r\n**Optional video**\r\nIf applicable, record a short [video](https://asciinema.org) with to help explain your problem.\r\nYou can also \r\n\r\n## Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nI expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided. \r\n \r\n## System information\r\n- <!-- Hydra Version --> Hydra Version: 0.9.0\r\n\r\n- <!-- Operating system --> Operating System: Arch Linux\r\n\r\n## Additional context\r\nIf you confirm that this should be fixed, I've already implemented the solution (IMO). \r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging.config\n\nfrom omegaconf import OmegaConf\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef get_method(path):\n return get_class(path)\n\n\ndef get_class(path):\n try:\n from importlib import import_module\n\n module_path, _, class_name = path.rpartition(\".\")\n mod = import_module(module_path)\n try:\n klass = getattr(mod, class_name)\n except AttributeError:\n raise ImportError(\n \"Class {} is not in module {}\".format(class_name, module_path)\n )\n return klass\n except ValueError as e:\n log.error(\"Error initializing class \" + path)\n raise e\n\n\ndef get_static_method(full_method_name):\n try:\n spl = full_method_name.split(\".\")\n method_name = spl.pop()\n class_name = \".\".join(spl)\n clz = get_class(class_name)\n return getattr(clz, method_name)\n except Exception as e:\n log.error(\"Error getting static method {} : {}\".format(full_method_name, e))\n raise e\n\n\ndef instantiate(config, *args, **kwargs):\n assert config is not None, \"Input config is None\"\n try:\n clazz = get_class(config[\"class\"])\n params = config.params if \"params\" in config else OmegaConf.create()\n params.merge_with(OmegaConf.create(kwargs))\n return clazz(*args, **params)\n except Exception as e:\n log.error(\"Error instantiating {} : {}\".format(config[\"class\"], e))\n raise e\n", "path": "hydra/utils.py"}]} | 1,815 | 204 |
gh_patches_debug_29306 | rasdani/github-patches | git_diff | saleor__saleor-2827 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CustomJsonEncoder drops currency information from Money instance
Usually, we are using `settings.DEFAULT_CURRENCY` all over the place, however, there can be a bug, where `Money` is saved into `JSONField`, and `settings.DEFAULT_CURRENCY` changed meanwhile.
</issue>
<code>
[start of saleor/core/utils/json_serializer.py]
1 from django.core.serializers.json import (
2 Deserializer as JsonDeserializer, DjangoJSONEncoder,
3 Serializer as JsonSerializer)
4 from prices import Money
5
6
7 class Serializer(JsonSerializer):
8 def _init_options(self):
9 super()._init_options()
10 self.json_kwargs['cls'] = CustomJsonEncoder
11
12
13 class CustomJsonEncoder(DjangoJSONEncoder):
14 def default(self, obj):
15 if isinstance(obj, Money):
16 return obj.amount
17 return super().default(obj)
18
19
20 Deserializer = JsonDeserializer
21
[end of saleor/core/utils/json_serializer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/core/utils/json_serializer.py b/saleor/core/utils/json_serializer.py
--- a/saleor/core/utils/json_serializer.py
+++ b/saleor/core/utils/json_serializer.py
@@ -1,8 +1,12 @@
+import json
+
+from django.core.serializers.base import DeserializationError
from django.core.serializers.json import (
- Deserializer as JsonDeserializer, DjangoJSONEncoder,
- Serializer as JsonSerializer)
+ DjangoJSONEncoder, PythonDeserializer, Serializer as JsonSerializer)
from prices import Money
+MONEY_TYPE = 'Money'
+
class Serializer(JsonSerializer):
def _init_options(self):
@@ -13,8 +17,32 @@
class CustomJsonEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, Money):
- return obj.amount
+ return {
+ '_type': MONEY_TYPE, 'amount': obj.amount,
+ 'currency': obj.currency}
return super().default(obj)
-Deserializer = JsonDeserializer
+def object_hook(obj):
+ if '_type' in obj and obj['_type'] == MONEY_TYPE:
+ return Money(obj['amount'], obj['currency'])
+ return obj
+
+
+def Deserializer(stream_or_string, **options):
+ """Deserialize a stream or string of JSON data. This is a slightly modified
+ copy of Django implementation with additional argument <object_hook> in
+ json.loads"""
+ if not isinstance(stream_or_string, (bytes, str)):
+ stream_or_string = stream_or_string.read()
+ if isinstance(stream_or_string, bytes):
+ stream_or_string = stream_or_string.decode()
+ try:
+ objects = json.loads(stream_or_string, object_hook=object_hook)
+ yield from PythonDeserializer(objects, **options)
+ except Exception as exc:
+ # ugly construction to overcome pylint's warning
+ # "The except handler raises immediately"
+ if isinstance(exc, (GeneratorExit, DeserializationError)):
+ raise
+ raise DeserializationError() from exc
| {"golden_diff": "diff --git a/saleor/core/utils/json_serializer.py b/saleor/core/utils/json_serializer.py\n--- a/saleor/core/utils/json_serializer.py\n+++ b/saleor/core/utils/json_serializer.py\n@@ -1,8 +1,12 @@\n+import json\n+\n+from django.core.serializers.base import DeserializationError\n from django.core.serializers.json import (\n- Deserializer as JsonDeserializer, DjangoJSONEncoder,\n- Serializer as JsonSerializer)\n+ DjangoJSONEncoder, PythonDeserializer, Serializer as JsonSerializer)\n from prices import Money\n \n+MONEY_TYPE = 'Money'\n+\n \n class Serializer(JsonSerializer):\n def _init_options(self):\n@@ -13,8 +17,32 @@\n class CustomJsonEncoder(DjangoJSONEncoder):\n def default(self, obj):\n if isinstance(obj, Money):\n- return obj.amount\n+ return {\n+ '_type': MONEY_TYPE, 'amount': obj.amount,\n+ 'currency': obj.currency}\n return super().default(obj)\n \n \n-Deserializer = JsonDeserializer\n+def object_hook(obj):\n+ if '_type' in obj and obj['_type'] == MONEY_TYPE:\n+ return Money(obj['amount'], obj['currency'])\n+ return obj\n+\n+\n+def Deserializer(stream_or_string, **options):\n+ \"\"\"Deserialize a stream or string of JSON data. This is a slightly modified\n+ copy of Django implementation with additional argument <object_hook> in\n+ json.loads\"\"\"\n+ if not isinstance(stream_or_string, (bytes, str)):\n+ stream_or_string = stream_or_string.read()\n+ if isinstance(stream_or_string, bytes):\n+ stream_or_string = stream_or_string.decode()\n+ try:\n+ objects = json.loads(stream_or_string, object_hook=object_hook)\n+ yield from PythonDeserializer(objects, **options)\n+ except Exception as exc:\n+ # ugly construction to overcome pylint's warning\n+ # \"The except handler raises immediately\"\n+ if isinstance(exc, (GeneratorExit, DeserializationError)):\n+ raise\n+ raise DeserializationError() from exc\n", "issue": "CustomJsonEncoder drops currency information from Money instance\nUsually, we are using `settings.DEFAULT_CURRENCY` all over the place, however, there can be a bug, where `Money` is saved into `JSONField`, and `settings.DEFAULT_CURRENCY` changed meanwhile.\r\n\r\n\n", "before_files": [{"content": "from django.core.serializers.json import (\n Deserializer as JsonDeserializer, DjangoJSONEncoder,\n Serializer as JsonSerializer)\nfrom prices import Money\n\n\nclass Serializer(JsonSerializer):\n def _init_options(self):\n super()._init_options()\n self.json_kwargs['cls'] = CustomJsonEncoder\n\n\nclass CustomJsonEncoder(DjangoJSONEncoder):\n def default(self, obj):\n if isinstance(obj, Money):\n return obj.amount\n return super().default(obj)\n\n\nDeserializer = JsonDeserializer\n", "path": "saleor/core/utils/json_serializer.py"}]} | 736 | 445 |
gh_patches_debug_6987 | rasdani/github-patches | git_diff | rasterio__rasterio-750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
-f does not work as expected in rio-warp
Running on a preexisting file
```
» rio warp tests/data/RGB.byte.tif /tmp/hi.tif
Error: Could not open file : file exists and won't be overwritten without use of the `-f` or `-o` options.
```
Then
```
» rio warp tests/data/RGB.byte.tif /tmp/hi.tif -f
Error: -f option requires an argument
```
But
```
rio warp tests/data/RGB.byte.tif /tmp/hi.tif --force-overwrite
```
Works.
</issue>
<code>
[start of rasterio/rio/helpers.py]
1 """
2 Helper objects used by multiple CLI commands.
3 """
4
5 import json
6 import os
7
8 from rasterio.errors import FileOverwriteError
9
10
11 def coords(obj):
12 """Yield all coordinate coordinate tuples from a geometry or feature.
13 From python-geojson package."""
14 if isinstance(obj, (tuple, list)):
15 coordinates = obj
16 elif 'geometry' in obj:
17 coordinates = obj['geometry']['coordinates']
18 else:
19 coordinates = obj.get('coordinates', obj)
20 for e in coordinates:
21 if isinstance(e, (float, int)):
22 yield tuple(coordinates)
23 break
24 else:
25 for f in coords(e):
26 yield f
27
28
29 def write_features(
30 fobj, collection, sequence=False, geojson_type='feature', use_rs=False,
31 **dump_kwds):
32 """Read an iterator of (feat, bbox) pairs and write to file using
33 the selected modes."""
34 # Sequence of features expressed as bbox, feature, or collection.
35 if sequence:
36 for feat in collection():
37 xs, ys = zip(*coords(feat))
38 bbox = (min(xs), min(ys), max(xs), max(ys))
39 if use_rs:
40 fobj.write(u'\u001e')
41 if geojson_type == 'feature':
42 fobj.write(json.dumps(feat, **dump_kwds))
43 elif geojson_type == 'bbox':
44 fobj.write(json.dumps(bbox, **dump_kwds))
45 else:
46 fobj.write(
47 json.dumps({
48 'type': 'FeatureCollection',
49 'bbox': bbox,
50 'features': [feat]}, **dump_kwds))
51 fobj.write('\n')
52 # Aggregate all features into a single object expressed as
53 # bbox or collection.
54 else:
55 features = list(collection())
56 if geojson_type == 'bbox':
57 fobj.write(json.dumps(collection.bbox, **dump_kwds))
58 elif geojson_type == 'feature':
59 fobj.write(json.dumps(features[0], **dump_kwds))
60 else:
61 fobj.write(json.dumps({
62 'bbox': collection.bbox,
63 'type': 'FeatureCollection',
64 'features': features},
65 **dump_kwds))
66 fobj.write('\n')
67
68
69 def resolve_inout(input=None, output=None, files=None, force_overwrite=False):
70 """Resolves inputs and outputs from standard args and options.
71
72 :param input: a single input filename, optional.
73 :param output: a single output filename, optional.
74 :param files: a sequence of filenames in which the last is the
75 output filename.
76 :param force_overwrite: whether to force overwriting the output
77 file, bool.
78 :return: the resolved output filename and input filenames as a
79 tuple of length 2.
80
81 If provided, the :param:`output` file may be overwritten. An output
82 file extracted from :param:`files` will not be overwritten unless
83 :param:`force_overwrite` is `True`.
84 """
85 resolved_output = output or (files[-1] if files else None)
86 force_overwrite = output is not None or force_overwrite
87 if not force_overwrite and resolved_output and os.path.exists(
88 resolved_output):
89 raise FileOverwriteError(
90 "file exists and won't be overwritten without use of the "
91 "`-f` or `-o` options.")
92 resolved_inputs = (
93 [input] if input else [] +
94 list(files[:-1 if not output else None]) if files else [])
95 return resolved_output, resolved_inputs
96
97
98 def to_lower(ctx, param, value):
99 """Click callback, converts values to lowercase."""
100 return value.lower()
101
[end of rasterio/rio/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/rio/helpers.py b/rasterio/rio/helpers.py
--- a/rasterio/rio/helpers.py
+++ b/rasterio/rio/helpers.py
@@ -88,7 +88,7 @@
resolved_output):
raise FileOverwriteError(
"file exists and won't be overwritten without use of the "
- "`-f` or `-o` options.")
+ "`--force-overwrite` or `--output` options.")
resolved_inputs = (
[input] if input else [] +
list(files[:-1 if not output else None]) if files else [])
| {"golden_diff": "diff --git a/rasterio/rio/helpers.py b/rasterio/rio/helpers.py\n--- a/rasterio/rio/helpers.py\n+++ b/rasterio/rio/helpers.py\n@@ -88,7 +88,7 @@\n resolved_output):\n raise FileOverwriteError(\n \"file exists and won't be overwritten without use of the \"\n- \"`-f` or `-o` options.\")\n+ \"`--force-overwrite` or `--output` options.\")\n resolved_inputs = (\n [input] if input else [] +\n list(files[:-1 if not output else None]) if files else [])\n", "issue": "-f does not work as expected in rio-warp\nRunning on a preexisting file\n\n```\n\u00bb rio warp tests/data/RGB.byte.tif /tmp/hi.tif\nError: Could not open file : file exists and won't be overwritten without use of the `-f` or `-o` options.\n```\n\nThen\n\n```\n\u00bb rio warp tests/data/RGB.byte.tif /tmp/hi.tif -f\nError: -f option requires an argument\n```\n\nBut\n\n```\nrio warp tests/data/RGB.byte.tif /tmp/hi.tif --force-overwrite\n```\n\nWorks.\n\n", "before_files": [{"content": "\"\"\"\nHelper objects used by multiple CLI commands.\n\"\"\"\n\nimport json\nimport os\n\nfrom rasterio.errors import FileOverwriteError\n\n\ndef coords(obj):\n \"\"\"Yield all coordinate coordinate tuples from a geometry or feature.\n From python-geojson package.\"\"\"\n if isinstance(obj, (tuple, list)):\n coordinates = obj\n elif 'geometry' in obj:\n coordinates = obj['geometry']['coordinates']\n else:\n coordinates = obj.get('coordinates', obj)\n for e in coordinates:\n if isinstance(e, (float, int)):\n yield tuple(coordinates)\n break\n else:\n for f in coords(e):\n yield f\n\n\ndef write_features(\n fobj, collection, sequence=False, geojson_type='feature', use_rs=False,\n **dump_kwds):\n \"\"\"Read an iterator of (feat, bbox) pairs and write to file using\n the selected modes.\"\"\"\n # Sequence of features expressed as bbox, feature, or collection.\n if sequence:\n for feat in collection():\n xs, ys = zip(*coords(feat))\n bbox = (min(xs), min(ys), max(xs), max(ys))\n if use_rs:\n fobj.write(u'\\u001e')\n if geojson_type == 'feature':\n fobj.write(json.dumps(feat, **dump_kwds))\n elif geojson_type == 'bbox':\n fobj.write(json.dumps(bbox, **dump_kwds))\n else:\n fobj.write(\n json.dumps({\n 'type': 'FeatureCollection',\n 'bbox': bbox,\n 'features': [feat]}, **dump_kwds))\n fobj.write('\\n')\n # Aggregate all features into a single object expressed as\n # bbox or collection.\n else:\n features = list(collection())\n if geojson_type == 'bbox':\n fobj.write(json.dumps(collection.bbox, **dump_kwds))\n elif geojson_type == 'feature':\n fobj.write(json.dumps(features[0], **dump_kwds))\n else:\n fobj.write(json.dumps({\n 'bbox': collection.bbox,\n 'type': 'FeatureCollection',\n 'features': features},\n **dump_kwds))\n fobj.write('\\n')\n\n\ndef resolve_inout(input=None, output=None, files=None, force_overwrite=False):\n \"\"\"Resolves inputs and outputs from standard args and options.\n\n :param input: a single input filename, optional.\n :param output: a single output filename, optional.\n :param files: a sequence of filenames in which the last is the\n output filename.\n :param force_overwrite: whether to force overwriting the output\n file, bool.\n :return: the resolved output filename and input filenames as a\n tuple of length 2.\n\n If provided, the :param:`output` file may be overwritten. An output\n file extracted from :param:`files` will not be overwritten unless\n :param:`force_overwrite` is `True`.\n \"\"\"\n resolved_output = output or (files[-1] if files else None)\n force_overwrite = output is not None or force_overwrite\n if not force_overwrite and resolved_output and os.path.exists(\n resolved_output):\n raise FileOverwriteError(\n \"file exists and won't be overwritten without use of the \"\n \"`-f` or `-o` options.\")\n resolved_inputs = (\n [input] if input else [] +\n list(files[:-1 if not output else None]) if files else [])\n return resolved_output, resolved_inputs\n\n\ndef to_lower(ctx, param, value):\n \"\"\"Click callback, converts values to lowercase.\"\"\"\n return value.lower()\n", "path": "rasterio/rio/helpers.py"}]} | 1,638 | 134 |
gh_patches_debug_8115 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-51 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Submitting empty code leads to server error
When sending empty program, we are getting:
Traceback:
File "/Library/Python/2.7/site-packages/django/core/handlers/base.py" in get_response
1. response = wrapped_callback(request, _callback_args, *_callback_kwargs)
File "/Library/Python/2.7/site-packages/django/contrib/auth/decorators.py" in _wrapped_view
2. return view_func(request, _args, *_kwargs)
File "/Users/paulina.koch/Documents/C4L/forks/aimmo/ui/players/views.py" in code
3. world.player_changed_code(request.user.id, request.user.player.code)
File "/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/game_state.py" in player_changed_code
4. avatar.set_code(code)
File "/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/avatar/avatar_wrapper.py" in set_code
5. self.avatar = Avatar()
Exception Type: TypeError at /api/code/
Exception Value: 'NoneType' object is not callable
Simple check should be enough, but we could use it as a chance to implement an extension of validation or error handling/printing when submitting code.
</issue>
<code>
[start of simulation/avatar/avatar_wrapper.py]
1 import traceback
2 import sys
3
4 from simulation.action import WaitAction
5
6
7 # This class will be implemented by the player
8 Avatar = None
9
10
11 class UserCodeException(Exception):
12 def __init__(self, *args, **kwargs):
13 super(Exception, self).__init__(*args, **kwargs)
14 self.exc_type, self.exc_value, self.exc_traceback = sys.exc_info()
15
16 def to_user_string(self):
17 lines = traceback.format_exception(self.exc_type, self.exc_value, self.exc_traceback)
18 return '<br/>'.join(lines)
19
20
21 class AvatarWrapper(object):
22 """
23 The application's view of a character, not to be confused with "Avatar", the player-supplied code.
24 """
25
26 def __init__(self, initial_location, initial_code, player_id, avatar_appearance):
27 self.location = initial_location
28 self.health = 5
29 self.score = 0
30 self.events = []
31 self.player_id = player_id
32 self.avatar_appearance = avatar_appearance
33 self.avatar = None
34
35 self.set_code(initial_code)
36
37 def handle_turn(self, state):
38 try:
39 next_action = self.avatar.handle_turn(state, self.events)
40 except Exception as e:
41 # TODO: tell user their program threw an exception during execution somehow...
42 print('avatar threw exception during handle_turn:', e)
43 traceback.print_exc()
44 next_action = WaitAction()
45 # Reset event log
46 self.events = []
47
48 return next_action
49
50 def die(self, respawn_location):
51 # TODO: extract settings for health and score loss on death
52 self.health = 5
53 self.score = max(0, self.score - 2)
54 self.location = respawn_location
55
56 def add_event(self, event):
57 self.events.append(event)
58
59 def set_code(self, code):
60 self.code = code
61 try:
62 exec(code)
63 except Exception as ex:
64 raise UserCodeException("Exception in user code", ex)
65 self.avatar = Avatar()
66
67 def __repr__(self):
68 return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,
69 self.health, self.score)
70
71
[end of simulation/avatar/avatar_wrapper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/simulation/avatar/avatar_wrapper.py b/simulation/avatar/avatar_wrapper.py
--- a/simulation/avatar/avatar_wrapper.py
+++ b/simulation/avatar/avatar_wrapper.py
@@ -60,9 +60,9 @@
self.code = code
try:
exec(code)
+ self.avatar = Avatar()
except Exception as ex:
raise UserCodeException("Exception in user code", ex)
- self.avatar = Avatar()
def __repr__(self):
return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,
| {"golden_diff": "diff --git a/simulation/avatar/avatar_wrapper.py b/simulation/avatar/avatar_wrapper.py\n--- a/simulation/avatar/avatar_wrapper.py\n+++ b/simulation/avatar/avatar_wrapper.py\n@@ -60,9 +60,9 @@\n self.code = code\n try:\n exec(code)\n+ self.avatar = Avatar()\n except Exception as ex:\n raise UserCodeException(\"Exception in user code\", ex)\n- self.avatar = Avatar()\n \n def __repr__(self):\n return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,\n", "issue": "Submitting empty code leads to server error\nWhen sending empty program, we are getting:\n\nTraceback:\nFile \"/Library/Python/2.7/site-packages/django/core/handlers/base.py\" in get_response\n1. response = wrapped_callback(request, _callback_args, *_callback_kwargs)\n File \"/Library/Python/2.7/site-packages/django/contrib/auth/decorators.py\" in _wrapped_view\n2. return view_func(request, _args, *_kwargs)\n File \"/Users/paulina.koch/Documents/C4L/forks/aimmo/ui/players/views.py\" in code\n3. world.player_changed_code(request.user.id, request.user.player.code)\n File \"/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/game_state.py\" in player_changed_code\n4. avatar.set_code(code)\n File \"/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/avatar/avatar_wrapper.py\" in set_code\n5. self.avatar = Avatar()\n\nException Type: TypeError at /api/code/\nException Value: 'NoneType' object is not callable\n\nSimple check should be enough, but we could use it as a chance to implement an extension of validation or error handling/printing when submitting code.\n\n", "before_files": [{"content": "import traceback\nimport sys\n\nfrom simulation.action import WaitAction\n\n\n# This class will be implemented by the player\nAvatar = None\n\n\nclass UserCodeException(Exception):\n def __init__(self, *args, **kwargs):\n super(Exception, self).__init__(*args, **kwargs)\n self.exc_type, self.exc_value, self.exc_traceback = sys.exc_info()\n\n def to_user_string(self):\n lines = traceback.format_exception(self.exc_type, self.exc_value, self.exc_traceback)\n return '<br/>'.join(lines)\n\n\nclass AvatarWrapper(object):\n \"\"\"\n The application's view of a character, not to be confused with \"Avatar\", the player-supplied code.\n \"\"\"\n\n def __init__(self, initial_location, initial_code, player_id, avatar_appearance):\n self.location = initial_location\n self.health = 5\n self.score = 0\n self.events = []\n self.player_id = player_id\n self.avatar_appearance = avatar_appearance\n self.avatar = None\n\n self.set_code(initial_code)\n\n def handle_turn(self, state):\n try:\n next_action = self.avatar.handle_turn(state, self.events)\n except Exception as e:\n # TODO: tell user their program threw an exception during execution somehow...\n print('avatar threw exception during handle_turn:', e)\n traceback.print_exc()\n next_action = WaitAction()\n # Reset event log\n self.events = []\n\n return next_action\n\n def die(self, respawn_location):\n # TODO: extract settings for health and score loss on death\n self.health = 5\n self.score = max(0, self.score - 2)\n self.location = respawn_location\n\n def add_event(self, event):\n self.events.append(event)\n\n def set_code(self, code):\n self.code = code\n try:\n exec(code)\n except Exception as ex:\n raise UserCodeException(\"Exception in user code\", ex)\n self.avatar = Avatar()\n\n def __repr__(self):\n return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,\n self.health, self.score)\n\n", "path": "simulation/avatar/avatar_wrapper.py"}]} | 1,407 | 125 |
gh_patches_debug_1611 | rasdani/github-patches | git_diff | google__mobly-311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exceptions in `setup_test` should leave the test in `ERROR` status
Regardless of the type of the exception, `setup_test` error should cause `ERROR` status.
This is different from a test method.
In a test method, an exception based on signals.TestFailure should cause the test to exit with `FAILED` status.
This is to be consistent with pyunit's behavior.
</issue>
<code>
[start of mobly/signals.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """This module is where all the test signal classes and related utilities live.
15 """
16
17 import functools
18 import json
19
20
21 class TestSignalError(Exception):
22 """Raised when an error occurs inside a test signal."""
23
24
25 class TestSignal(Exception):
26 """Base class for all test result control signals. This is used to signal
27 the result of a test.
28
29 Attribute:
30 details: A string that describes the reason for raising this signal.
31 extras: A json-serializable data type to convey extra information about
32 a test result.
33 """
34
35 def __init__(self, details, extras=None):
36 super(TestSignal, self).__init__(details)
37 self.details = str(details)
38 try:
39 json.dumps(extras)
40 self.extras = extras
41 except TypeError:
42 raise TestSignalError('Extras must be json serializable. %s '
43 'is not.' % extras)
44
45 def __str__(self):
46 return 'Details=%s, Extras=%s' % (self.details, self.extras)
47
48
49 class TestFailure(TestSignal):
50 """Raised when a test has failed."""
51
52
53 class TestPass(TestSignal):
54 """Raised when a test has passed."""
55
56
57 class TestSkip(TestSignal):
58 """Raised when a test has been skipped."""
59
60
61 class TestAbortClass(TestSignal):
62 """Raised when all subsequent tests within the same test class should
63 be aborted.
64 """
65
66
67 class TestAbortAll(TestSignal):
68 """Raised when all subsequent tests should be aborted."""
69
70
71 class ControllerError(Exception):
72 """Raised when an error occured in controller classes."""
73
[end of mobly/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mobly/signals.py b/mobly/signals.py
--- a/mobly/signals.py
+++ b/mobly/signals.py
@@ -46,6 +46,10 @@
return 'Details=%s, Extras=%s' % (self.details, self.extras)
+class TestError(TestSignal):
+ """Raised when a test has an unexpected error."""
+
+
class TestFailure(TestSignal):
"""Raised when a test has failed."""
| {"golden_diff": "diff --git a/mobly/signals.py b/mobly/signals.py\n--- a/mobly/signals.py\n+++ b/mobly/signals.py\n@@ -46,6 +46,10 @@\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n \n \n+class TestError(TestSignal):\n+ \"\"\"Raised when a test has an unexpected error.\"\"\"\n+\n+\n class TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n", "issue": "Exceptions in `setup_test` should leave the test in `ERROR` status\nRegardless of the type of the exception, `setup_test` error should cause `ERROR` status.\r\n\r\nThis is different from a test method.\r\nIn a test method, an exception based on signals.TestFailure should cause the test to exit with `FAILED` status.\r\n\r\nThis is to be consistent with pyunit's behavior.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is where all the test signal classes and related utilities live.\n\"\"\"\n\nimport functools\nimport json\n\n\nclass TestSignalError(Exception):\n \"\"\"Raised when an error occurs inside a test signal.\"\"\"\n\n\nclass TestSignal(Exception):\n \"\"\"Base class for all test result control signals. This is used to signal\n the result of a test.\n\n Attribute:\n details: A string that describes the reason for raising this signal.\n extras: A json-serializable data type to convey extra information about\n a test result.\n \"\"\"\n\n def __init__(self, details, extras=None):\n super(TestSignal, self).__init__(details)\n self.details = str(details)\n try:\n json.dumps(extras)\n self.extras = extras\n except TypeError:\n raise TestSignalError('Extras must be json serializable. %s '\n 'is not.' % extras)\n\n def __str__(self):\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n\n\nclass TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n\n\nclass TestPass(TestSignal):\n \"\"\"Raised when a test has passed.\"\"\"\n\n\nclass TestSkip(TestSignal):\n \"\"\"Raised when a test has been skipped.\"\"\"\n\n\nclass TestAbortClass(TestSignal):\n \"\"\"Raised when all subsequent tests within the same test class should\n be aborted.\n \"\"\"\n\n\nclass TestAbortAll(TestSignal):\n \"\"\"Raised when all subsequent tests should be aborted.\"\"\"\n\n\nclass ControllerError(Exception):\n \"\"\"Raised when an error occured in controller classes.\"\"\"\n", "path": "mobly/signals.py"}]} | 1,219 | 107 |
gh_patches_debug_37219 | rasdani/github-patches | git_diff | streamlink__streamlink-3290 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bfmtv - No playable streams found on this URL
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
<!-- Replace [ ] with [x] in order to check the box -->
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
<!-- Explain the plugin issue as thoroughly as you can. -->
There is no playable streams for bfmtv
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
`streamlink https://www.bfmtv.com/en-direct/ best`
or `streamlink https://www.bfmtv.com/paris/en-direct/ best`
or any other channels supported by this plugin
### Log output
<!--
TEXT LOG OUTPUT IS REQUIRED for a plugin issue!
Use the `--loglevel debug` parameter and avoid using parameters which suppress log output.
https://streamlink.github.io/cli.html#cmdoption-l
Make sure to **remove usernames and passwords**
You can copy the output to https://gist.github.com/ or paste it below.
Don't post screenshots of the log output and instead copy the text from your terminal application.
-->
```
streamlink --loglevel debug https://www.bfmtv.com/en-direct/ best
[cli][debug] OS: Linux-5.8.15-201.fc32.x86_64-x86_64-with-glibc2.2.5
[cli][debug] Python: 3.8.6
[cli][debug] Streamlink: 1.7.0
[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)
[cli][info] Found matching plugin bfmtv for URL https://www.bfmtv.com/en-direct/
error: No playable streams found on this URL: https://www.bfmtv.com/en-direct/
```
### Additional comments, etc.
As a workaround you can use their dailymotion stream: `streamlink https://www.dailymotion.com/embed/video/xgz4t1 best`
</issue>
<code>
[start of src/streamlink/plugins/bfmtv.py]
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugins.brightcove import BrightcovePlayer
5 from streamlink.stream import HLSStream
6
7
8 class BFMTV(Plugin):
9 _url_re = re.compile(r'https://.+\.(?:bfmtv|01net)\.com')
10 _brightcove_video_re = re.compile(
11 r'data-holder="video(?P<video_id>[0-9]+)" data-account="(?P<account_id>[0-9]+)"'
12 )
13 _brightcove_video_alt_re = re.compile(
14 r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"',
15 re.DOTALL
16 )
17 _embed_video_url_re = re.compile(
18 r"\$YOPLAYER\('liveStitching', {.+?file: '(?P<video_url>[^\"]+?)'.+?}\);",
19 re.DOTALL
20 )
21
22 @classmethod
23 def can_handle_url(cls, url):
24 return BFMTV._url_re.match(url)
25
26 def _get_streams(self):
27 # Retrieve URL page and search for Brightcove video data
28 res = self.session.http.get(self.url)
29 match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)
30 if match is not None:
31 account_id = match.group('account_id')
32 video_id = match.group('video_id')
33 player = BrightcovePlayer(self.session, account_id)
34 for stream in player.get_streams(video_id):
35 yield stream
36 else:
37 # Try to get the stream URL in the page
38 match = self._embed_video_url_re.search(res.text)
39 if match is not None:
40 video_url = match.group('video_url')
41 if '.m3u8' in video_url:
42 yield from HLSStream.parse_variant_playlist(self.session, video_url).items()
43
44
45 __plugin__ = BFMTV
46
[end of src/streamlink/plugins/bfmtv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py
--- a/src/streamlink/plugins/bfmtv.py
+++ b/src/streamlink/plugins/bfmtv.py
@@ -1,45 +1,46 @@
+import logging
import re
from streamlink.plugin import Plugin
from streamlink.plugins.brightcove import BrightcovePlayer
-from streamlink.stream import HLSStream
+
+log = logging.getLogger(__name__)
class BFMTV(Plugin):
_url_re = re.compile(r'https://.+\.(?:bfmtv|01net)\.com')
+ _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'
_brightcove_video_re = re.compile(
- r'data-holder="video(?P<video_id>[0-9]+)" data-account="(?P<account_id>[0-9]+)"'
- )
- _brightcove_video_alt_re = re.compile(
- r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"',
+ r'accountid="(?P<account_id>[0-9]+).*?videoid="(?P<video_id>[0-9]+)"',
re.DOTALL
)
- _embed_video_url_re = re.compile(
- r"\$YOPLAYER\('liveStitching', {.+?file: '(?P<video_url>[^\"]+?)'.+?}\);",
+ _embed_video_id_re = re.compile(
+ r'<iframe.*?src=".*?/(?P<video_id>\w+)"',
re.DOTALL
)
@classmethod
def can_handle_url(cls, url):
- return BFMTV._url_re.match(url)
+ return cls._url_re.match(url) is not None
def _get_streams(self):
# Retrieve URL page and search for Brightcove video data
res = self.session.http.get(self.url)
- match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)
+ match = self._brightcove_video_re.search(res.text)
if match is not None:
account_id = match.group('account_id')
+ log.debug(f'Account ID: {account_id}')
video_id = match.group('video_id')
+ log.debug(f'Video ID: {video_id}')
player = BrightcovePlayer(self.session, account_id)
- for stream in player.get_streams(video_id):
- yield stream
+ yield from player.get_streams(video_id)
else:
- # Try to get the stream URL in the page
- match = self._embed_video_url_re.search(res.text)
+ # Try to find the Dailymotion video ID
+ match = self._embed_video_id_re.search(res.text)
if match is not None:
- video_url = match.group('video_url')
- if '.m3u8' in video_url:
- yield from HLSStream.parse_variant_playlist(self.session, video_url).items()
+ video_id = match.group('video_id')
+ log.debug(f'Video ID: {video_id}')
+ yield from self.session.streams(self._dailymotion_url.format(video_id)).items()
__plugin__ = BFMTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py\n--- a/src/streamlink/plugins/bfmtv.py\n+++ b/src/streamlink/plugins/bfmtv.py\n@@ -1,45 +1,46 @@\n+import logging\n import re\n \n from streamlink.plugin import Plugin\n from streamlink.plugins.brightcove import BrightcovePlayer\n-from streamlink.stream import HLSStream\n+\n+log = logging.getLogger(__name__)\n \n \n class BFMTV(Plugin):\n _url_re = re.compile(r'https://.+\\.(?:bfmtv|01net)\\.com')\n+ _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'\n _brightcove_video_re = re.compile(\n- r'data-holder=\"video(?P<video_id>[0-9]+)\" data-account=\"(?P<account_id>[0-9]+)\"'\n- )\n- _brightcove_video_alt_re = re.compile(\n- r'data-account=\"(?P<account_id>[0-9]+).*?data-video-id=\"(?P<video_id>[0-9]+)\"',\n+ r'accountid=\"(?P<account_id>[0-9]+).*?videoid=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n- _embed_video_url_re = re.compile(\n- r\"\\$YOPLAYER\\('liveStitching', {.+?file: '(?P<video_url>[^\\\"]+?)'.+?}\\);\",\n+ _embed_video_id_re = re.compile(\n+ r'<iframe.*?src=\".*?/(?P<video_id>\\w+)\"',\n re.DOTALL\n )\n \n @classmethod\n def can_handle_url(cls, url):\n- return BFMTV._url_re.match(url)\n+ return cls._url_re.match(url) is not None\n \n def _get_streams(self):\n # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n- match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n+ match = self._brightcove_video_re.search(res.text)\n if match is not None:\n account_id = match.group('account_id')\n+ log.debug(f'Account ID: {account_id}')\n video_id = match.group('video_id')\n+ log.debug(f'Video ID: {video_id}')\n player = BrightcovePlayer(self.session, account_id)\n- for stream in player.get_streams(video_id):\n- yield stream\n+ yield from player.get_streams(video_id)\n else:\n- # Try to get the stream URL in the page\n- match = self._embed_video_url_re.search(res.text)\n+ # Try to find the Dailymotion video ID\n+ match = self._embed_video_id_re.search(res.text)\n if match is not None:\n- video_url = match.group('video_url')\n- if '.m3u8' in video_url:\n- yield from HLSStream.parse_variant_playlist(self.session, video_url).items()\n+ video_id = match.group('video_id')\n+ log.debug(f'Video ID: {video_id}')\n+ yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n \n \n __plugin__ = BFMTV\n", "issue": "bfmtv - No playable streams found on this URL\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\nThere is no playable streams for bfmtv\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n`streamlink https://www.bfmtv.com/en-direct/ best`\r\n\r\nor `streamlink https://www.bfmtv.com/paris/en-direct/ best`\r\nor any other channels supported by this plugin\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n\r\nDon't post screenshots of the log output and instead copy the text from your terminal application.\r\n-->\r\n\r\n```\r\nstreamlink --loglevel debug https://www.bfmtv.com/en-direct/ best\r\n[cli][debug] OS: Linux-5.8.15-201.fc32.x86_64-x86_64-with-glibc2.2.5\r\n[cli][debug] Python: 3.8.6\r\n[cli][debug] Streamlink: 1.7.0\r\n[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)\r\n[cli][info] Found matching plugin bfmtv for URL https://www.bfmtv.com/en-direct/\r\nerror: No playable streams found on this URL: https://www.bfmtv.com/en-direct/\r\n```\r\n\r\n\r\n### Additional comments, etc.\r\n\r\nAs a workaround you can use their dailymotion stream: `streamlink https://www.dailymotion.com/embed/video/xgz4t1 best`\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugins.brightcove import BrightcovePlayer\nfrom streamlink.stream import HLSStream\n\n\nclass BFMTV(Plugin):\n _url_re = re.compile(r'https://.+\\.(?:bfmtv|01net)\\.com')\n _brightcove_video_re = re.compile(\n r'data-holder=\"video(?P<video_id>[0-9]+)\" data-account=\"(?P<account_id>[0-9]+)\"'\n )\n _brightcove_video_alt_re = re.compile(\n r'data-account=\"(?P<account_id>[0-9]+).*?data-video-id=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n _embed_video_url_re = re.compile(\n r\"\\$YOPLAYER\\('liveStitching', {.+?file: '(?P<video_url>[^\\\"]+?)'.+?}\\);\",\n re.DOTALL\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return BFMTV._url_re.match(url)\n\n def _get_streams(self):\n # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n if match is not None:\n account_id = match.group('account_id')\n video_id = match.group('video_id')\n player = BrightcovePlayer(self.session, account_id)\n for stream in player.get_streams(video_id):\n yield stream\n else:\n # Try to get the stream URL in the page\n match = self._embed_video_url_re.search(res.text)\n if match is not None:\n video_url = match.group('video_url')\n if '.m3u8' in video_url:\n yield from HLSStream.parse_variant_playlist(self.session, video_url).items()\n\n\n__plugin__ = BFMTV\n", "path": "src/streamlink/plugins/bfmtv.py"}]} | 1,633 | 749 |
gh_patches_debug_27707 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-8345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
puregym_gb spider broken
It looks like the PureGym spider (puregym_gb.py) is broken. For the last few weeks it was consistently returning 303 results, but then on 2024-05-04 the number dropped to 1. Looking at the Spider stats, I think it's found 401 branches, but then dropped all but one of them for having duplicate refs.
If I've understood what's going on correctly, the spider code is obtaining a ref from a "gymId" parameter on each individual branch page. However, I don't think this parameter exists any more on those pages. Hence each branch gets the same null ref value, and then all the duplicates are dropped.
Hopefully removing the custom ref code will fix the spider. I think there should be around 400 branches in total. (There's currently 399 unique https://www.puregym.com/gyms/*/ URLs in the sitemap.) So probably even the 303 results being returned consistently before was as a result of incorrectly dropping ~100 gyms.
I've just checked a sample of 15 of the URLs from the sitemap. I found one "temporarily closed" https://www.puregym.com/gyms/tonbridge/ and one "coming soon" https://www.puregym.com/gyms/washington/ . I don't know if it's worth trying to catch these cases or not.
</issue>
<code>
[start of locations/spiders/puregym_gb.py]
1 from scrapy.spiders import SitemapSpider
2
3 from locations.google_url import extract_google_position
4 from locations.structured_data_spider import StructuredDataSpider
5
6
7 class PureGymGBSpider(SitemapSpider, StructuredDataSpider):
8 name = "puregym_gb"
9 item_attributes = {
10 "brand": "PureGym",
11 "brand_wikidata": "Q18345898",
12 "country": "GB",
13 }
14 allowed_domains = ["www.puregym.com"]
15 sitemap_urls = ["https://www.puregym.com/sitemap.xml"]
16 sitemap_rules = [
17 (
18 r"https:\/\/www\.puregym\.com\/gyms\/([\w-]+)\/$",
19 "parse_sd",
20 ),
21 ]
22 wanted_types = ["HealthClub"]
23
24 def inspect_item(self, item, response):
25 item["ref"] = response.xpath('//meta[@itemprop="gymId"]/@content').get()
26 extract_google_position(item, response)
27
28 yield item
29
[end of locations/spiders/puregym_gb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/puregym_gb.py b/locations/spiders/puregym_gb.py
--- a/locations/spiders/puregym_gb.py
+++ b/locations/spiders/puregym_gb.py
@@ -1,28 +1,28 @@
+from urllib.parse import parse_qs, urlparse
+
from scrapy.spiders import SitemapSpider
-from locations.google_url import extract_google_position
from locations.structured_data_spider import StructuredDataSpider
class PureGymGBSpider(SitemapSpider, StructuredDataSpider):
name = "puregym_gb"
- item_attributes = {
- "brand": "PureGym",
- "brand_wikidata": "Q18345898",
- "country": "GB",
- }
+ item_attributes = {"brand": "PureGym", "brand_wikidata": "Q18345898", "country": "GB"}
allowed_domains = ["www.puregym.com"]
sitemap_urls = ["https://www.puregym.com/sitemap.xml"]
- sitemap_rules = [
- (
- r"https:\/\/www\.puregym\.com\/gyms\/([\w-]+)\/$",
- "parse_sd",
- ),
- ]
+ sitemap_rules = [(r"/gyms/([^/]+)/$", "parse_sd")]
wanted_types = ["HealthClub"]
- def inspect_item(self, item, response):
- item["ref"] = response.xpath('//meta[@itemprop="gymId"]/@content').get()
- extract_google_position(item, response)
+ def pre_process_data(self, ld_data, **kwargs):
+ ld_data["address"] = ld_data.get("location", {}).get("address")
+
+ def post_process_item(self, item, response, ld_data, **kwargs):
+ item["branch"] = item.pop("name")
+ item["image"] = None
+
+ if img := response.xpath('//img[contains(@src, "tiles.stadiamaps.com")]/@src').get():
+ q = parse_qs(urlparse(img)[4])
+ if "center" in q:
+ item["lat"], item["lon"] = q["center"][0].split(",", 1)
yield item
| {"golden_diff": "diff --git a/locations/spiders/puregym_gb.py b/locations/spiders/puregym_gb.py\n--- a/locations/spiders/puregym_gb.py\n+++ b/locations/spiders/puregym_gb.py\n@@ -1,28 +1,28 @@\n+from urllib.parse import parse_qs, urlparse\n+\n from scrapy.spiders import SitemapSpider\n \n-from locations.google_url import extract_google_position\n from locations.structured_data_spider import StructuredDataSpider\n \n \n class PureGymGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"puregym_gb\"\n- item_attributes = {\n- \"brand\": \"PureGym\",\n- \"brand_wikidata\": \"Q18345898\",\n- \"country\": \"GB\",\n- }\n+ item_attributes = {\"brand\": \"PureGym\", \"brand_wikidata\": \"Q18345898\", \"country\": \"GB\"}\n allowed_domains = [\"www.puregym.com\"]\n sitemap_urls = [\"https://www.puregym.com/sitemap.xml\"]\n- sitemap_rules = [\n- (\n- r\"https:\\/\\/www\\.puregym\\.com\\/gyms\\/([\\w-]+)\\/$\",\n- \"parse_sd\",\n- ),\n- ]\n+ sitemap_rules = [(r\"/gyms/([^/]+)/$\", \"parse_sd\")]\n wanted_types = [\"HealthClub\"]\n \n- def inspect_item(self, item, response):\n- item[\"ref\"] = response.xpath('//meta[@itemprop=\"gymId\"]/@content').get()\n- extract_google_position(item, response)\n+ def pre_process_data(self, ld_data, **kwargs):\n+ ld_data[\"address\"] = ld_data.get(\"location\", {}).get(\"address\")\n+\n+ def post_process_item(self, item, response, ld_data, **kwargs):\n+ item[\"branch\"] = item.pop(\"name\")\n+ item[\"image\"] = None\n+\n+ if img := response.xpath('//img[contains(@src, \"tiles.stadiamaps.com\")]/@src').get():\n+ q = parse_qs(urlparse(img)[4])\n+ if \"center\" in q:\n+ item[\"lat\"], item[\"lon\"] = q[\"center\"][0].split(\",\", 1)\n \n yield item\n", "issue": "puregym_gb spider broken\nIt looks like the PureGym spider (puregym_gb.py) is broken. For the last few weeks it was consistently returning 303 results, but then on 2024-05-04 the number dropped to 1. Looking at the Spider stats, I think it's found 401 branches, but then dropped all but one of them for having duplicate refs.\r\n\r\nIf I've understood what's going on correctly, the spider code is obtaining a ref from a \"gymId\" parameter on each individual branch page. However, I don't think this parameter exists any more on those pages. Hence each branch gets the same null ref value, and then all the duplicates are dropped.\r\n\r\nHopefully removing the custom ref code will fix the spider. I think there should be around 400 branches in total. (There's currently 399 unique https://www.puregym.com/gyms/*/ URLs in the sitemap.) So probably even the 303 results being returned consistently before was as a result of incorrectly dropping ~100 gyms.\r\n\r\nI've just checked a sample of 15 of the URLs from the sitemap. I found one \"temporarily closed\" https://www.puregym.com/gyms/tonbridge/ and one \"coming soon\" https://www.puregym.com/gyms/washington/ . I don't know if it's worth trying to catch these cases or not.\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.google_url import extract_google_position\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass PureGymGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"puregym_gb\"\n item_attributes = {\n \"brand\": \"PureGym\",\n \"brand_wikidata\": \"Q18345898\",\n \"country\": \"GB\",\n }\n allowed_domains = [\"www.puregym.com\"]\n sitemap_urls = [\"https://www.puregym.com/sitemap.xml\"]\n sitemap_rules = [\n (\n r\"https:\\/\\/www\\.puregym\\.com\\/gyms\\/([\\w-]+)\\/$\",\n \"parse_sd\",\n ),\n ]\n wanted_types = [\"HealthClub\"]\n\n def inspect_item(self, item, response):\n item[\"ref\"] = response.xpath('//meta[@itemprop=\"gymId\"]/@content').get()\n extract_google_position(item, response)\n\n yield item\n", "path": "locations/spiders/puregym_gb.py"}]} | 1,129 | 512 |
gh_patches_debug_34298 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add price to Turkey
I am not sure which of the ticker [prices](https://www.epias.com.tr/en) to use but section 6.7.3 BLOCK ORDER BOOK in [this guide](https://www.epias.com.tr/wp-content/uploads/2017/06/INTRADAY_MARKET_USER_GUIDE_ENG_18.09.2017.pdf) may help.
</issue>
<code>
[start of parsers/TR.py]
1 #!/usr/bin/env python3
2
3 import requests
4 import re
5 import json
6 import arrow
7
8 SEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')
9 TIMEZONE = 'Europe/Istanbul'
10 URL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'
11 EMPTY_DAY = -1
12
13 MAP_GENERATION = {
14 'akarsu': 'hydro',
15 'barajli': 'hydro',
16 'dogalgaz': 'gas',
17 'lng': 'gas',
18 'lpg': 'gas',
19 'jeotermal': 'geothermal',
20 'taskomur': 'coal',
21 'asfaltitkomur': 'coal',
22 'linyit': 'coal',
23 'ithalkomur': 'coal',
24 'ruzgar': 'wind',
25 'fueloil': 'oil',
26 'biyokutle': 'biomass',
27 'nafta': 'oil',
28 'gunes': 'solar',
29 'nukleer': 'nuclear',
30 'kojenerasyon': 'unknown',
31 'motorin': 'oil',
32 }
33
34
35 def as_float(prod):
36 """Convert json values to float and sum all production for a further use"""
37 prod['total'] = 0.0
38 if isinstance(prod, dict) and 'yuk' not in prod.keys():
39 for prod_type, prod_val in prod.items():
40 prod[prod_type] = float(prod_val)
41 prod['total'] += prod[prod_type]
42 return prod
43
44
45 def get_last_data_idx(productions):
46 """
47 Find index of the last production
48 :param productions: list of 24 production dict objects
49 :return: (int) index of the newest data or -1 if no data (empty day)
50 """
51 for i in range(len(productions)):
52 if productions[i]['total'] < 1000:
53 return i - 1
54 return len(productions) - 1 # full day
55
56
57 def fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):
58 """
59 Requests the last known production mix (in MW) of a given country
60 Arguments:
61 zone_key (optional) -- used in case a parser is able to fetch multiple countries
62 session (optional) -- request session passed in order to re-use an existing session
63 Return:
64 A list of dictionaries in the form:
65 {
66 'zoneKey': 'FR',
67 'datetime': '2017-01-01T00:00:00Z',
68 'production': {
69 'biomass': 0.0,
70 'coal': 0.0,
71 'gas': 0.0,
72 'hydro': 0.0,
73 'nuclear': null,
74 'oil': 0.0,
75 'solar': 0.0,
76 'wind': 0.0,
77 'geothermal': 0.0,
78 'unknown': 0.0
79 },
80 'storage': {
81 'hydro': -10.0,
82 },
83 'source': 'mysource.com'
84 }
85 """
86 if target_datetime:
87 raise NotImplementedError('This parser is not yet able to parse past dates')
88
89 session = None # Explicitely make a new session to avoid caching from their server...
90 r = session or requests.session()
91 tr_datetime = arrow.now().to('Europe/Istanbul').floor('day')
92 response = r.get(URL, verify=False)
93 str_data = re.search(SEARCH_DATA, response.text)
94
95 production_by_hour = []
96 if str_data:
97 productions = json.loads(str_data.group('data'), object_hook=as_float)
98 last_data_index = get_last_data_idx(productions)
99 valid_production = productions[:last_data_index + 1]
100 if last_data_index != EMPTY_DAY:
101 for datapoint in valid_production:
102 data = {
103 'zoneKey': zone_key,
104 'production': {},
105 'storage': {},
106 'source': 'ytbs.teias.gov.tr',
107 'datetime': None
108 }
109 data['production'] = dict(zip(MAP_GENERATION.values(), [0] * len(MAP_GENERATION)))
110 for prod_type, prod_val in datapoint.items():
111 if prod_type in MAP_GENERATION.keys():
112 data['production'][MAP_GENERATION[prod_type]] += prod_val
113 elif prod_type not in ['total', 'uluslarasi', 'saat']:
114 logger.warning('Warning: %s (%d) is missing in mapping!' % (prod_type, prod_val))
115
116 try:
117 data['datetime'] = tr_datetime.replace(hour=int(datapoint['saat'])).datetime
118 except ValueError:
119 # 24 is not a valid hour!
120 data['datetime'] = tr_datetime.datetime
121
122 production_by_hour.append(data)
123 else:
124 raise Exception('Extracted data was None')
125
126 return production_by_hour
127
128
129 if __name__ == '__main__':
130 """Main method, never used by the Electricity Map backend, but handy for testing."""
131
132 print('fetch_production() ->')
133 print(fetch_production())
134
[end of parsers/TR.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/TR.py b/parsers/TR.py
--- a/parsers/TR.py
+++ b/parsers/TR.py
@@ -4,12 +4,17 @@
import re
import json
import arrow
+from bs4 import BeautifulSoup
+import datetime as dt
SEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')
TIMEZONE = 'Europe/Istanbul'
URL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'
EMPTY_DAY = -1
+PRICE_URL = 'https://seffaflik.epias.com.tr/transparency/piyasalar/' \
+ 'gop/ptf.xhtml'
+
MAP_GENERATION = {
'akarsu': 'hydro',
'barajli': 'hydro',
@@ -54,6 +59,35 @@
return len(productions) - 1 # full day
+def fetch_price():
+ soup = BeautifulSoup(requests.get(PRICE_URL).text, 'html.parser')
+ cells = soup.select('.TexAlCenter')
+
+ # data is in td elements with class "TexAlCenter" and role "gridcell"
+ data = list()
+ for cell in cells:
+ if cell.attrs.get('role', '') != 'gridcell':
+ continue
+ data.append(cell.text)
+
+ dates = [dt.datetime.strptime(val, '%d/%m/%Y').date()
+ for i, val in enumerate(data) if i % 3 == 0]
+ times = [dt.datetime.strptime(val, '%H:%M').time()
+ for i, val in enumerate(data) if i % 3 == 1]
+ prices = [float(val.replace(',', '.'))
+ for i, val in enumerate(data) if i % 3 == 2]
+
+ datapoints = [{
+ 'zoneKey': 'TR',
+ 'currency': 'TRY',
+ 'datetime': arrow.get(
+ dt.datetime.combine(date, time)).to('Europe/Istanbul').datetime,
+ 'price': price,
+ 'source': 'epias.com.tr'
+ } for date, time, price in zip(dates, times, prices)]
+ return datapoints
+
+
def fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):
"""
Requests the last known production mix (in MW) of a given country
@@ -127,7 +161,10 @@
if __name__ == '__main__':
- """Main method, never used by the Electricity Map backend, but handy for testing."""
+ """Main method, never used by the Electricity Map backend, but handy for
+ testing."""
print('fetch_production() ->')
print(fetch_production())
+ print('fetch_price() ->')
+ print(fetch_price())
| {"golden_diff": "diff --git a/parsers/TR.py b/parsers/TR.py\n--- a/parsers/TR.py\n+++ b/parsers/TR.py\n@@ -4,12 +4,17 @@\n import re\n import json\n import arrow\n+from bs4 import BeautifulSoup\n+import datetime as dt\n \n SEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')\n TIMEZONE = 'Europe/Istanbul'\n URL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'\n EMPTY_DAY = -1\n \n+PRICE_URL = 'https://seffaflik.epias.com.tr/transparency/piyasalar/' \\\n+ 'gop/ptf.xhtml'\n+\n MAP_GENERATION = {\n 'akarsu': 'hydro',\n 'barajli': 'hydro',\n@@ -54,6 +59,35 @@\n return len(productions) - 1 # full day\n \n \n+def fetch_price():\n+ soup = BeautifulSoup(requests.get(PRICE_URL).text, 'html.parser')\n+ cells = soup.select('.TexAlCenter')\n+\n+ # data is in td elements with class \"TexAlCenter\" and role \"gridcell\"\n+ data = list()\n+ for cell in cells:\n+ if cell.attrs.get('role', '') != 'gridcell':\n+ continue\n+ data.append(cell.text)\n+\n+ dates = [dt.datetime.strptime(val, '%d/%m/%Y').date()\n+ for i, val in enumerate(data) if i % 3 == 0]\n+ times = [dt.datetime.strptime(val, '%H:%M').time()\n+ for i, val in enumerate(data) if i % 3 == 1]\n+ prices = [float(val.replace(',', '.'))\n+ for i, val in enumerate(data) if i % 3 == 2]\n+\n+ datapoints = [{\n+ 'zoneKey': 'TR',\n+ 'currency': 'TRY',\n+ 'datetime': arrow.get(\n+ dt.datetime.combine(date, time)).to('Europe/Istanbul').datetime,\n+ 'price': price,\n+ 'source': 'epias.com.tr'\n+ } for date, time, price in zip(dates, times, prices)]\n+ return datapoints\n+\n+\n def fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n@@ -127,7 +161,10 @@\n \n \n if __name__ == '__main__':\n- \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n+ \"\"\"Main method, never used by the Electricity Map backend, but handy for\n+ testing.\"\"\"\n \n print('fetch_production() ->')\n print(fetch_production())\n+ print('fetch_price() ->')\n+ print(fetch_price())\n", "issue": "Add price to Turkey\nI am not sure which of the ticker [prices](https://www.epias.com.tr/en) to use but section 6.7.3 BLOCK ORDER BOOK in [this guide](https://www.epias.com.tr/wp-content/uploads/2017/06/INTRADAY_MARKET_USER_GUIDE_ENG_18.09.2017.pdf) may help.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport requests\nimport re\nimport json\nimport arrow\n\nSEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')\nTIMEZONE = 'Europe/Istanbul'\nURL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'\nEMPTY_DAY = -1\n\nMAP_GENERATION = {\n 'akarsu': 'hydro',\n 'barajli': 'hydro',\n 'dogalgaz': 'gas',\n 'lng': 'gas',\n 'lpg': 'gas',\n 'jeotermal': 'geothermal',\n 'taskomur': 'coal',\n 'asfaltitkomur': 'coal',\n 'linyit': 'coal',\n 'ithalkomur': 'coal',\n 'ruzgar': 'wind',\n 'fueloil': 'oil',\n 'biyokutle': 'biomass',\n 'nafta': 'oil',\n 'gunes': 'solar',\n 'nukleer': 'nuclear',\n 'kojenerasyon': 'unknown',\n 'motorin': 'oil',\n}\n\n\ndef as_float(prod):\n \"\"\"Convert json values to float and sum all production for a further use\"\"\"\n prod['total'] = 0.0\n if isinstance(prod, dict) and 'yuk' not in prod.keys():\n for prod_type, prod_val in prod.items():\n prod[prod_type] = float(prod_val)\n prod['total'] += prod[prod_type]\n return prod\n\n\ndef get_last_data_idx(productions):\n \"\"\"\n Find index of the last production\n :param productions: list of 24 production dict objects\n :return: (int) index of the newest data or -1 if no data (empty day)\n \"\"\"\n for i in range(len(productions)):\n if productions[i]['total'] < 1000:\n return i - 1\n return len(productions) - 1 # full day\n\n\ndef fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n session = None # Explicitely make a new session to avoid caching from their server...\n r = session or requests.session()\n tr_datetime = arrow.now().to('Europe/Istanbul').floor('day')\n response = r.get(URL, verify=False)\n str_data = re.search(SEARCH_DATA, response.text)\n\n production_by_hour = []\n if str_data:\n productions = json.loads(str_data.group('data'), object_hook=as_float)\n last_data_index = get_last_data_idx(productions)\n valid_production = productions[:last_data_index + 1]\n if last_data_index != EMPTY_DAY:\n for datapoint in valid_production:\n data = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'ytbs.teias.gov.tr',\n 'datetime': None\n }\n data['production'] = dict(zip(MAP_GENERATION.values(), [0] * len(MAP_GENERATION)))\n for prod_type, prod_val in datapoint.items():\n if prod_type in MAP_GENERATION.keys():\n data['production'][MAP_GENERATION[prod_type]] += prod_val\n elif prod_type not in ['total', 'uluslarasi', 'saat']:\n logger.warning('Warning: %s (%d) is missing in mapping!' % (prod_type, prod_val))\n\n try:\n data['datetime'] = tr_datetime.replace(hour=int(datapoint['saat'])).datetime\n except ValueError:\n # 24 is not a valid hour!\n data['datetime'] = tr_datetime.datetime\n\n production_by_hour.append(data)\n else:\n raise Exception('Extracted data was None')\n\n return production_by_hour\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/TR.py"}]} | 2,047 | 631 |
gh_patches_debug_477 | rasdani/github-patches | git_diff | scrapy__scrapy-5786 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pypy3-pinned OpenSSL error
Yet another CI issue, visible in https://github.com/scrapy/scrapy/actions/runs/3849823417/jobs/6559259481
> /home/runner/work/scrapy/scrapy/.tox/pypy3-pinned/site-packages/cryptography/hazmat/bindings/_openssl.pypy37-pp73-x86_64-linux-gnu.so: undefined symbol: FIPS_mode
This may be a problem with that specific binary distribution (it's `cryptography==3.3`), not sure why it worked before, maybe something was rebuilt recently.
</issue>
<code>
[start of setup.py]
1 from pathlib import Path
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 version = (Path(__file__).parent / 'scrapy/VERSION').read_text('ascii').strip()
7
8
9 def has_environment_marker_platform_impl_support():
10 """Code extracted from 'pytest/setup.py'
11 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
12
13 The first known release to support environment marker with range operators
14 it is 18.5, see:
15 https://setuptools.readthedocs.io/en/latest/history.html#id235
16 """
17 return parse_version(setuptools_version) >= parse_version('18.5')
18
19
20 install_requires = [
21 'Twisted>=18.9.0',
22 'cryptography>=3.3',
23 'cssselect>=0.9.1',
24 'itemloaders>=1.0.1',
25 'parsel>=1.5.0',
26 'pyOpenSSL>=21.0.0',
27 'queuelib>=1.4.2',
28 'service_identity>=18.1.0',
29 'w3lib>=1.17.0',
30 'zope.interface>=5.1.0',
31 'protego>=0.1.15',
32 'itemadapter>=0.1.0',
33 'setuptools',
34 'packaging',
35 'tldextract',
36 'lxml>=4.3.0',
37 ]
38 extras_require = {}
39 cpython_dependencies = [
40 'PyDispatcher>=2.0.5',
41 ]
42 if has_environment_marker_platform_impl_support():
43 extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
44 extras_require[':platform_python_implementation == "PyPy"'] = [
45 'PyPyDispatcher>=2.1.0',
46 ]
47 else:
48 install_requires.extend(cpython_dependencies)
49
50
51 setup(
52 name='Scrapy',
53 version=version,
54 url='https://scrapy.org',
55 project_urls={
56 'Documentation': 'https://docs.scrapy.org/',
57 'Source': 'https://github.com/scrapy/scrapy',
58 'Tracker': 'https://github.com/scrapy/scrapy/issues',
59 },
60 description='A high-level Web Crawling and Web Scraping framework',
61 long_description=open('README.rst', encoding="utf-8").read(),
62 author='Scrapy developers',
63 author_email='[email protected]',
64 maintainer='Pablo Hoffman',
65 maintainer_email='[email protected]',
66 license='BSD',
67 packages=find_packages(exclude=('tests', 'tests.*')),
68 include_package_data=True,
69 zip_safe=False,
70 entry_points={
71 'console_scripts': ['scrapy = scrapy.cmdline:execute']
72 },
73 classifiers=[
74 'Framework :: Scrapy',
75 'Development Status :: 5 - Production/Stable',
76 'Environment :: Console',
77 'Intended Audience :: Developers',
78 'License :: OSI Approved :: BSD License',
79 'Operating System :: OS Independent',
80 'Programming Language :: Python',
81 'Programming Language :: Python :: 3',
82 'Programming Language :: Python :: 3.7',
83 'Programming Language :: Python :: 3.8',
84 'Programming Language :: Python :: 3.9',
85 'Programming Language :: Python :: 3.10',
86 'Programming Language :: Python :: 3.11',
87 'Programming Language :: Python :: Implementation :: CPython',
88 'Programming Language :: Python :: Implementation :: PyPy',
89 'Topic :: Internet :: WWW/HTTP',
90 'Topic :: Software Development :: Libraries :: Application Frameworks',
91 'Topic :: Software Development :: Libraries :: Python Modules',
92 ],
93 python_requires='>=3.7',
94 install_requires=install_requires,
95 extras_require=extras_require,
96 )
97
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
install_requires = [
'Twisted>=18.9.0',
- 'cryptography>=3.3',
+ 'cryptography>=3.4.6',
'cssselect>=0.9.1',
'itemloaders>=1.0.1',
'parsel>=1.5.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n \n install_requires = [\n 'Twisted>=18.9.0',\n- 'cryptography>=3.3',\n+ 'cryptography>=3.4.6',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n", "issue": "pypy3-pinned OpenSSL error\nYet another CI issue, visible in https://github.com/scrapy/scrapy/actions/runs/3849823417/jobs/6559259481\r\n\r\n> /home/runner/work/scrapy/scrapy/.tox/pypy3-pinned/site-packages/cryptography/hazmat/bindings/_openssl.pypy37-pp73-x86_64-linux-gnu.so: undefined symbol: FIPS_mode\r\n\r\nThis may be a problem with that specific binary distribution (it's `cryptography==3.3`), not sure why it worked before, maybe something was rebuilt recently. \n", "before_files": [{"content": "from pathlib import Path\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nversion = (Path(__file__).parent / 'scrapy/VERSION').read_text('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=18.9.0',\n 'cryptography>=3.3',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=21.0.0',\n 'queuelib>=1.4.2',\n 'service_identity>=18.1.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'packaging',\n 'tldextract',\n 'lxml>=4.3.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst', encoding=\"utf-8\").read(),\n author='Scrapy developers',\n author_email='[email protected]',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}]} | 1,695 | 106 |
gh_patches_debug_28254 | rasdani/github-patches | git_diff | airctic__icevision-722 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename EfficientDet Backbones
rename EfficientDet Backbones by deleting the `efficientdet_` part from the names: no need to reference that because the model already suggests that
</issue>
<code>
[start of icevision/models/ross/efficientdet/backbones.py]
1 __all__ = [
2 "tf_efficientdet_lite0",
3 "efficientdet_d0",
4 "efficientdet_d1",
5 "efficientdet_d2",
6 "efficientdet_d3",
7 "efficientdet_d4",
8 "efficientdet_d5",
9 "efficientdet_d6",
10 "efficientdet_d7",
11 "efficientdet_d7x",
12 ]
13
14 from icevision.models.ross.efficientdet.utils import *
15
16
17 tf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
18
19 efficientdet_d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
20
21 efficientdet_d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
22
23 efficientdet_d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
24
25 efficientdet_d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
26
27 efficientdet_d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
28
29 efficientdet_d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
30
31 efficientdet_d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
32
33 efficientdet_d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
34
35 efficientdet_d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
36
[end of icevision/models/ross/efficientdet/backbones.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py
--- a/icevision/models/ross/efficientdet/backbones.py
+++ b/icevision/models/ross/efficientdet/backbones.py
@@ -1,35 +1,35 @@
__all__ = [
- "tf_efficientdet_lite0",
- "efficientdet_d0",
- "efficientdet_d1",
- "efficientdet_d2",
- "efficientdet_d3",
- "efficientdet_d4",
- "efficientdet_d5",
- "efficientdet_d6",
- "efficientdet_d7",
- "efficientdet_d7x",
+ "tf_lite0",
+ "d0",
+ "d1",
+ "d2",
+ "d3",
+ "d4",
+ "d5",
+ "d6",
+ "d7",
+ "d7x",
]
from icevision.models.ross.efficientdet.utils import *
-tf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
+tf_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
-efficientdet_d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
+d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
-efficientdet_d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
+d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
-efficientdet_d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
+d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
-efficientdet_d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
+d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
-efficientdet_d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
+d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
-efficientdet_d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
+d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
-efficientdet_d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
+d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
-efficientdet_d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
+d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
-efficientdet_d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
+d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
| {"golden_diff": "diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py\n--- a/icevision/models/ross/efficientdet/backbones.py\n+++ b/icevision/models/ross/efficientdet/backbones.py\n@@ -1,35 +1,35 @@\n __all__ = [\n- \"tf_efficientdet_lite0\",\n- \"efficientdet_d0\",\n- \"efficientdet_d1\",\n- \"efficientdet_d2\",\n- \"efficientdet_d3\",\n- \"efficientdet_d4\",\n- \"efficientdet_d5\",\n- \"efficientdet_d6\",\n- \"efficientdet_d7\",\n- \"efficientdet_d7x\",\n+ \"tf_lite0\",\n+ \"d0\",\n+ \"d1\",\n+ \"d2\",\n+ \"d3\",\n+ \"d4\",\n+ \"d5\",\n+ \"d6\",\n+ \"d7\",\n+ \"d7x\",\n ]\n \n from icevision.models.ross.efficientdet.utils import *\n \n \n-tf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\n+tf_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\n \n-efficientdet_d0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\n+d0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\n \n-efficientdet_d1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\n+d1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\n \n-efficientdet_d2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\n+d2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\n \n-efficientdet_d3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\n+d3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\n \n-efficientdet_d4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\n+d4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\n \n-efficientdet_d5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\n+d5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\n \n-efficientdet_d6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n+d6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n \n-efficientdet_d7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n+d7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n \n-efficientdet_d7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n+d7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n", "issue": "Rename EfficientDet Backbones\nrename EfficientDet Backbones by deleting the `efficientdet_` part from the names: no need to reference that because the model already suggests that\r\n\n", "before_files": [{"content": "__all__ = [\n \"tf_efficientdet_lite0\",\n \"efficientdet_d0\",\n \"efficientdet_d1\",\n \"efficientdet_d2\",\n \"efficientdet_d3\",\n \"efficientdet_d4\",\n \"efficientdet_d5\",\n \"efficientdet_d6\",\n \"efficientdet_d7\",\n \"efficientdet_d7x\",\n]\n\nfrom icevision.models.ross.efficientdet.utils import *\n\n\ntf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\n\nefficientdet_d0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\n\nefficientdet_d1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\n\nefficientdet_d2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\n\nefficientdet_d3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\n\nefficientdet_d4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\n\nefficientdet_d5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\n\nefficientdet_d6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n\nefficientdet_d7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n\nefficientdet_d7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n", "path": "icevision/models/ross/efficientdet/backbones.py"}]} | 938 | 601 |
gh_patches_debug_32768 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-972 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixtures loads 8 times on docker-compose up
### Current Behavior
When starting up the application, the console log shows that the fixtures file is executed 8 times in a row. However, current logic on the fixtures file is preventing duplicate entries in the database.
### Expected Behavior
Data should just be loaded into the db once when using fixtures.
### Steps to Reproduce
1. locally run docker-compose up
2. observe the logs
3. see that the logs for loading domain application, users, etc are repeated 8 times each. Also note the warning messages that show an attempt was made to enter data that already exists in the db.
### Environment
local & on sandbox (see the deploy logs)
### Additional Context
see this [slack thread](https://cisa-corp.slack.com/archives/C05BGB4L5NF/p1687988129781299)
### Issue Links
_No response_
</issue>
<code>
[start of src/registrar/management/commands/load.py]
1 import logging
2
3 from django.core.management.base import BaseCommand
4 from auditlog.context import disable_auditlog # type: ignore
5
6 from registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture
7
8 logger = logging.getLogger(__name__)
9
10
11 class Command(BaseCommand):
12 def handle(self, *args, **options):
13 # django-auditlog has some bugs with fixtures
14 # https://github.com/jazzband/django-auditlog/issues/17
15 with disable_auditlog():
16 UserFixture.load()
17 DomainApplicationFixture.load()
18 DomainFixture.load()
19 logger.info("All fixtures loaded.")
20
[end of src/registrar/management/commands/load.py]
[start of src/registrar/signals.py]
1 import logging
2
3 from django.conf import settings
4 from django.core.management import call_command
5 from django.db.models.signals import post_save, post_migrate
6 from django.dispatch import receiver
7
8 from .models import User, Contact
9
10
11 logger = logging.getLogger(__name__)
12
13
14 @receiver(post_save, sender=User)
15 def handle_profile(sender, instance, **kwargs):
16 """Method for when a User is saved.
17
18 A first time registrant may have been invited, so we'll search for a matching
19 Contact record, by email address, and associate them, if possible.
20
21 A first time registrant may not have a matching Contact, so we'll create one,
22 copying the contact values we received from Login.gov in order to initialize it.
23
24 During subsequent login, a User record may be updated with new data from Login.gov,
25 but in no case will we update contact values on an existing Contact record.
26 """
27
28 first_name = getattr(instance, "first_name", "")
29 last_name = getattr(instance, "last_name", "")
30 email = getattr(instance, "email", "")
31 phone = getattr(instance, "phone", "")
32
33 is_new_user = kwargs.get("created", False)
34
35 if is_new_user:
36 contacts = Contact.objects.filter(email=email)
37 else:
38 contacts = Contact.objects.filter(user=instance)
39
40 if len(contacts) == 0: # no matching contact
41 Contact.objects.create(
42 user=instance,
43 first_name=first_name,
44 last_name=last_name,
45 email=email,
46 phone=phone,
47 )
48
49 if len(contacts) >= 1 and is_new_user: # a matching contact
50 contacts[0].user = instance
51 contacts[0].save()
52
53 if len(contacts) > 1: # multiple matches
54 logger.warning(
55 "There are multiple Contacts with the same email address."
56 f" Picking #{contacts[0].id} for User #{instance.id}."
57 )
58
59
60 @receiver(post_migrate)
61 def handle_loaddata(**kwargs):
62 """Attempt to load test fixtures when in DEBUG mode."""
63 if settings.DEBUG:
64 try:
65 call_command("load")
66 except Exception as e:
67 logger.warning(e)
68
[end of src/registrar/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/management/commands/load.py b/src/registrar/management/commands/load.py
--- a/src/registrar/management/commands/load.py
+++ b/src/registrar/management/commands/load.py
@@ -2,6 +2,7 @@
from django.core.management.base import BaseCommand
from auditlog.context import disable_auditlog # type: ignore
+from django.conf import settings
from registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture
@@ -12,8 +13,11 @@
def handle(self, *args, **options):
# django-auditlog has some bugs with fixtures
# https://github.com/jazzband/django-auditlog/issues/17
- with disable_auditlog():
- UserFixture.load()
- DomainApplicationFixture.load()
- DomainFixture.load()
- logger.info("All fixtures loaded.")
+ if settings.DEBUG:
+ with disable_auditlog():
+ UserFixture.load()
+ DomainApplicationFixture.load()
+ DomainFixture.load()
+ logger.info("All fixtures loaded.")
+ else:
+ logger.warn("Refusing to load fixture data in a non DEBUG env")
diff --git a/src/registrar/signals.py b/src/registrar/signals.py
--- a/src/registrar/signals.py
+++ b/src/registrar/signals.py
@@ -1,8 +1,6 @@
import logging
-from django.conf import settings
-from django.core.management import call_command
-from django.db.models.signals import post_save, post_migrate
+from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import User, Contact
@@ -55,13 +53,3 @@
"There are multiple Contacts with the same email address."
f" Picking #{contacts[0].id} for User #{instance.id}."
)
-
-
-@receiver(post_migrate)
-def handle_loaddata(**kwargs):
- """Attempt to load test fixtures when in DEBUG mode."""
- if settings.DEBUG:
- try:
- call_command("load")
- except Exception as e:
- logger.warning(e)
| {"golden_diff": "diff --git a/src/registrar/management/commands/load.py b/src/registrar/management/commands/load.py\n--- a/src/registrar/management/commands/load.py\n+++ b/src/registrar/management/commands/load.py\n@@ -2,6 +2,7 @@\n \n from django.core.management.base import BaseCommand\n from auditlog.context import disable_auditlog # type: ignore\n+from django.conf import settings\n \n from registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture\n \n@@ -12,8 +13,11 @@\n def handle(self, *args, **options):\n # django-auditlog has some bugs with fixtures\n # https://github.com/jazzband/django-auditlog/issues/17\n- with disable_auditlog():\n- UserFixture.load()\n- DomainApplicationFixture.load()\n- DomainFixture.load()\n- logger.info(\"All fixtures loaded.\")\n+ if settings.DEBUG:\n+ with disable_auditlog():\n+ UserFixture.load()\n+ DomainApplicationFixture.load()\n+ DomainFixture.load()\n+ logger.info(\"All fixtures loaded.\")\n+ else:\n+ logger.warn(\"Refusing to load fixture data in a non DEBUG env\")\ndiff --git a/src/registrar/signals.py b/src/registrar/signals.py\n--- a/src/registrar/signals.py\n+++ b/src/registrar/signals.py\n@@ -1,8 +1,6 @@\n import logging\n \n-from django.conf import settings\n-from django.core.management import call_command\n-from django.db.models.signals import post_save, post_migrate\n+from django.db.models.signals import post_save\n from django.dispatch import receiver\n \n from .models import User, Contact\n@@ -55,13 +53,3 @@\n \"There are multiple Contacts with the same email address.\"\n f\" Picking #{contacts[0].id} for User #{instance.id}.\"\n )\n-\n-\n-@receiver(post_migrate)\n-def handle_loaddata(**kwargs):\n- \"\"\"Attempt to load test fixtures when in DEBUG mode.\"\"\"\n- if settings.DEBUG:\n- try:\n- call_command(\"load\")\n- except Exception as e:\n- logger.warning(e)\n", "issue": "Fixtures loads 8 times on docker-compose up\n### Current Behavior\n\nWhen starting up the application, the console log shows that the fixtures file is executed 8 times in a row. However, current logic on the fixtures file is preventing duplicate entries in the database.\n\n### Expected Behavior\n\nData should just be loaded into the db once when using fixtures.\n\n### Steps to Reproduce\n\n1. locally run docker-compose up\r\n2. observe the logs\r\n3. see that the logs for loading domain application, users, etc are repeated 8 times each. Also note the warning messages that show an attempt was made to enter data that already exists in the db.\r\n\n\n### Environment\n\nlocal & on sandbox (see the deploy logs)\n\n### Additional Context\n\nsee this [slack thread](https://cisa-corp.slack.com/archives/C05BGB4L5NF/p1687988129781299)\n\n### Issue Links\n\n_No response_\n", "before_files": [{"content": "import logging\n\nfrom django.core.management.base import BaseCommand\nfrom auditlog.context import disable_auditlog # type: ignore\n\nfrom registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n # django-auditlog has some bugs with fixtures\n # https://github.com/jazzband/django-auditlog/issues/17\n with disable_auditlog():\n UserFixture.load()\n DomainApplicationFixture.load()\n DomainFixture.load()\n logger.info(\"All fixtures loaded.\")\n", "path": "src/registrar/management/commands/load.py"}, {"content": "import logging\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.db.models.signals import post_save, post_migrate\nfrom django.dispatch import receiver\n\nfrom .models import User, Contact\n\n\nlogger = logging.getLogger(__name__)\n\n\n@receiver(post_save, sender=User)\ndef handle_profile(sender, instance, **kwargs):\n \"\"\"Method for when a User is saved.\n\n A first time registrant may have been invited, so we'll search for a matching\n Contact record, by email address, and associate them, if possible.\n\n A first time registrant may not have a matching Contact, so we'll create one,\n copying the contact values we received from Login.gov in order to initialize it.\n\n During subsequent login, a User record may be updated with new data from Login.gov,\n but in no case will we update contact values on an existing Contact record.\n \"\"\"\n\n first_name = getattr(instance, \"first_name\", \"\")\n last_name = getattr(instance, \"last_name\", \"\")\n email = getattr(instance, \"email\", \"\")\n phone = getattr(instance, \"phone\", \"\")\n\n is_new_user = kwargs.get(\"created\", False)\n\n if is_new_user:\n contacts = Contact.objects.filter(email=email)\n else:\n contacts = Contact.objects.filter(user=instance)\n\n if len(contacts) == 0: # no matching contact\n Contact.objects.create(\n user=instance,\n first_name=first_name,\n last_name=last_name,\n email=email,\n phone=phone,\n )\n\n if len(contacts) >= 1 and is_new_user: # a matching contact\n contacts[0].user = instance\n contacts[0].save()\n\n if len(contacts) > 1: # multiple matches\n logger.warning(\n \"There are multiple Contacts with the same email address.\"\n f\" Picking #{contacts[0].id} for User #{instance.id}.\"\n )\n\n\n@receiver(post_migrate)\ndef handle_loaddata(**kwargs):\n \"\"\"Attempt to load test fixtures when in DEBUG mode.\"\"\"\n if settings.DEBUG:\n try:\n call_command(\"load\")\n except Exception as e:\n logger.warning(e)\n", "path": "src/registrar/signals.py"}]} | 1,528 | 462 |
gh_patches_debug_1646 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cant delete mails through REST API endpoints
The endpoint to remove mails are fucked :)
</issue>
<code>
[start of apps/authentication/api/views.py]
1 from django.contrib.auth.models import Group
2 from rest_framework import mixins, status, viewsets
3 from rest_framework.decorators import action
4 from rest_framework.permissions import AllowAny, IsAuthenticated
5 from rest_framework.response import Response
6
7 from apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup
8 from apps.authentication.models import OnlineUser as User
9 from apps.authentication.models import Position, SpecialPosition
10 from apps.authentication.serializers import (
11 AnonymizeUserSerializer,
12 EmailCreateSerializer,
13 EmailReadOnlySerializer,
14 EmailUpdateSerializer,
15 GroupMemberCreateSerializer,
16 GroupMemberReadOnlySerializer,
17 GroupMemberUpdateSerializer,
18 GroupReadOnlySerializer,
19 GroupRoleReadOnlySerializer,
20 OnlineGroupCreateOrUpdateSerializer,
21 OnlineGroupReadOnlySerializer,
22 PasswordUpdateSerializer,
23 PositionCreateAndUpdateSerializer,
24 PositionReadOnlySerializer,
25 SpecialPositionSerializer,
26 UserCreateSerializer,
27 UserReadOnlySerializer,
28 UserUpdateSerializer,
29 )
30 from apps.common.rest_framework.mixins import MultiSerializerMixin
31 from apps.permissions.drf_permissions import DjangoObjectPermissionOrAnonReadOnly
32
33 from .filters import UserFilter
34 from .permissions import IsSelfOrSuperUser
35 from .serializers.user_data import UserDataSerializer
36
37
38 class UserViewSet(
39 MultiSerializerMixin,
40 viewsets.GenericViewSet,
41 mixins.ListModelMixin,
42 mixins.RetrieveModelMixin,
43 mixins.CreateModelMixin,
44 mixins.UpdateModelMixin,
45 ):
46 """
47 Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email'
48 """
49
50 permission_classes = (IsSelfOrSuperUser,)
51 filterset_class = UserFilter
52 queryset = User.objects.all()
53 serializer_classes = {
54 "create": UserCreateSerializer,
55 "update": UserUpdateSerializer,
56 "read": UserReadOnlySerializer,
57 "change_password": PasswordUpdateSerializer,
58 "anonymize_user": AnonymizeUserSerializer,
59 "dump_data": UserDataSerializer,
60 }
61
62 @action(detail=True, methods=["put"])
63 def change_password(self, request, pk=None):
64 user: User = self.get_object()
65 serializer = self.get_serializer(user, data=request.data)
66 serializer.is_valid(raise_exception=True)
67 serializer.save()
68
69 return Response(data=None, status=status.HTTP_204_NO_CONTENT)
70
71 @action(detail=True, methods=["put"])
72 def anonymize_user(self, request, pk=None):
73 user: User = self.get_object()
74 serializer = self.get_serializer(user, data=request.data)
75 serializer.is_valid(raise_exception=True)
76 serializer.save()
77
78 return Response(data=None, status=status.HTTP_204_NO_CONTENT)
79
80 @action(detail=True, methods=["get"], url_path="dump-data")
81 def dump_data(self, request, pk: int):
82 user: User = self.get_object()
83 serializer = self.get_serializer(user)
84 return Response(data=serializer.data, status=status.HTTP_200_OK)
85
86
87 class EmailViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
88 permission_classes = (IsAuthenticated,)
89 serializer_classes = {
90 "create": EmailCreateSerializer,
91 "update": EmailUpdateSerializer,
92 "read": EmailReadOnlySerializer,
93 }
94
95 def get_queryset(self):
96 return Email.objects.filter(user=self.request.user)
97
98 def destroy(self, request, *args, **kwargs):
99 instance: Email = self.get_object()
100 if instance.primary:
101 return Response(
102 {
103 "message": "Du kan ikke slette en primær-epost. Du må først velge en annen epost som "
104 "primær for å kunne slette denne."
105 },
106 status=status.HTTP_400_BAD_REQUEST,
107 )
108
109
110 class PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
111 permission_classes = (IsAuthenticated,)
112 serializer_classes = {
113 "read": PositionReadOnlySerializer,
114 "write": PositionCreateAndUpdateSerializer,
115 }
116
117 def get_queryset(self):
118 user = self.request.user
119 return Position.objects.filter(user=user)
120
121
122 class SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet):
123 serializer_class = SpecialPositionSerializer
124 permission_classes = (IsAuthenticated,)
125
126 def get_queryset(self):
127 user = self.request.user
128 return SpecialPosition.objects.filter(user=user)
129
130
131 class GroupViewSet(viewsets.ReadOnlyModelViewSet):
132 permission_classes = (AllowAny,)
133 queryset = Group.objects.all()
134 serializer_class = GroupReadOnlySerializer
135 ordering = ("name",)
136
137
138 class OnlineGroupViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
139 permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)
140 queryset = OnlineGroup.objects.all()
141 serializer_classes = {
142 "write": OnlineGroupCreateOrUpdateSerializer,
143 "read": OnlineGroupReadOnlySerializer,
144 }
145
146
147 class GroupMemberViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
148 permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)
149 queryset = GroupMember.objects.all()
150 serializer_classes = {
151 "create": GroupMemberCreateSerializer,
152 "update": GroupMemberUpdateSerializer,
153 "read": GroupMemberReadOnlySerializer,
154 }
155
156
157 class GroupRoleViewSet(viewsets.ReadOnlyModelViewSet):
158 permission_classes = (AllowAny,)
159 serializer_class = GroupRoleReadOnlySerializer
160 queryset = GroupRole.objects.all()
161
[end of apps/authentication/api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/api/views.py b/apps/authentication/api/views.py
--- a/apps/authentication/api/views.py
+++ b/apps/authentication/api/views.py
@@ -105,6 +105,7 @@
},
status=status.HTTP_400_BAD_REQUEST,
)
+ super().destroy(request, *args, **kwargs)
class PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
| {"golden_diff": "diff --git a/apps/authentication/api/views.py b/apps/authentication/api/views.py\n--- a/apps/authentication/api/views.py\n+++ b/apps/authentication/api/views.py\n@@ -105,6 +105,7 @@\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n+ super().destroy(request, *args, **kwargs)\n \n \n class PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n", "issue": "Cant delete mails through REST API endpoints\nThe endpoint to remove mails are fucked :)\n", "before_files": [{"content": "from django.contrib.auth.models import Group\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.authentication.models import Position, SpecialPosition\nfrom apps.authentication.serializers import (\n AnonymizeUserSerializer,\n EmailCreateSerializer,\n EmailReadOnlySerializer,\n EmailUpdateSerializer,\n GroupMemberCreateSerializer,\n GroupMemberReadOnlySerializer,\n GroupMemberUpdateSerializer,\n GroupReadOnlySerializer,\n GroupRoleReadOnlySerializer,\n OnlineGroupCreateOrUpdateSerializer,\n OnlineGroupReadOnlySerializer,\n PasswordUpdateSerializer,\n PositionCreateAndUpdateSerializer,\n PositionReadOnlySerializer,\n SpecialPositionSerializer,\n UserCreateSerializer,\n UserReadOnlySerializer,\n UserUpdateSerializer,\n)\nfrom apps.common.rest_framework.mixins import MultiSerializerMixin\nfrom apps.permissions.drf_permissions import DjangoObjectPermissionOrAnonReadOnly\n\nfrom .filters import UserFilter\nfrom .permissions import IsSelfOrSuperUser\nfrom .serializers.user_data import UserDataSerializer\n\n\nclass UserViewSet(\n MultiSerializerMixin,\n viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"\n Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email'\n \"\"\"\n\n permission_classes = (IsSelfOrSuperUser,)\n filterset_class = UserFilter\n queryset = User.objects.all()\n serializer_classes = {\n \"create\": UserCreateSerializer,\n \"update\": UserUpdateSerializer,\n \"read\": UserReadOnlySerializer,\n \"change_password\": PasswordUpdateSerializer,\n \"anonymize_user\": AnonymizeUserSerializer,\n \"dump_data\": UserDataSerializer,\n }\n\n @action(detail=True, methods=[\"put\"])\n def change_password(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"put\"])\n def anonymize_user(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"get\"], url_path=\"dump-data\")\n def dump_data(self, request, pk: int):\n user: User = self.get_object()\n serializer = self.get_serializer(user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass EmailViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"create\": EmailCreateSerializer,\n \"update\": EmailUpdateSerializer,\n \"read\": EmailReadOnlySerializer,\n }\n\n def get_queryset(self):\n return Email.objects.filter(user=self.request.user)\n\n def destroy(self, request, *args, **kwargs):\n instance: Email = self.get_object()\n if instance.primary:\n return Response(\n {\n \"message\": \"Du kan ikke slette en prim\u00e6r-epost. Du m\u00e5 f\u00f8rst velge en annen epost som \"\n \"prim\u00e6r for \u00e5 kunne slette denne.\"\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n\nclass PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"read\": PositionReadOnlySerializer,\n \"write\": PositionCreateAndUpdateSerializer,\n }\n\n def get_queryset(self):\n user = self.request.user\n return Position.objects.filter(user=user)\n\n\nclass SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = SpecialPositionSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n user = self.request.user\n return SpecialPosition.objects.filter(user=user)\n\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n queryset = Group.objects.all()\n serializer_class = GroupReadOnlySerializer\n ordering = (\"name\",)\n\n\nclass OnlineGroupViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = OnlineGroup.objects.all()\n serializer_classes = {\n \"write\": OnlineGroupCreateOrUpdateSerializer,\n \"read\": OnlineGroupReadOnlySerializer,\n }\n\n\nclass GroupMemberViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = GroupMember.objects.all()\n serializer_classes = {\n \"create\": GroupMemberCreateSerializer,\n \"update\": GroupMemberUpdateSerializer,\n \"read\": GroupMemberReadOnlySerializer,\n }\n\n\nclass GroupRoleViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n serializer_class = GroupRoleReadOnlySerializer\n queryset = GroupRole.objects.all()\n", "path": "apps/authentication/api/views.py"}]} | 2,033 | 89 |
gh_patches_debug_5467 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1173 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TN: 109th Session, Legislators, Upper - Senator name extraction fails for district 19
Trying to run the scraper locally, I encountered the following exception for TN's [19th senate district in the 109th general assembly](http://www.capitol.tn.gov/senate/archives/109GA/members/s19.html) (Sen. Thelma Hale):
```
File "/usr/local/bin/billy-update", line 9, in <module>
load_entry_point('billy==1.8.4', 'console_scripts', 'billy-update')()
File "/opt/sunlightfoundation.com/billy/billy/bin/update.py", line 377, in main
run_record += _run_scraper(stype, args, metadata)
File "/opt/sunlightfoundation.com/billy/billy/bin/update.py", line 101, in _run_scraper
scraper.scrape(chamber, time)
File "/srv/openstates-web/openstates/tn/legislators.py", line 81, in scrape
name = member_page.xpath('//div[@id="membertitle"]/h2/text()')[0]
IndexError: list index out of range
```
</issue>
<code>
[start of openstates/tn/legislators.py]
1 import HTMLParser
2
3 from billy.scrape.legislators import LegislatorScraper, Legislator
4 import lxml.html
5 from scrapelib import HTTPError
6 from openstates.utils import LXMLMixin
7
8 class TNLegislatorScraper(LegislatorScraper, LXMLMixin):
9 jurisdiction = 'tn'
10
11 def scrape(self, chamber, term):
12 self.validate_term(term, latest_only=False)
13 root_url = 'http://www.capitol.tn.gov/'
14 parties = {'D': 'Democratic', 'R': 'Republican',
15 'CCR': 'Carter County Republican',
16 'I': 'Independent'}
17
18 #testing for chamber
19 if chamber == 'upper':
20 url_chamber_name = 'senate'
21 abbr = 's'
22 else:
23 url_chamber_name = 'house'
24 abbr = 'h'
25 if term != self.metadata["terms"][-1]["sessions"][0]:
26 chamber_url = root_url + url_chamber_name
27 chamber_url += '/archives/' + term + 'GA/Members/index.html'
28 else:
29 chamber_url = root_url + url_chamber_name + '/members/'
30
31 page = self.lxmlize(chamber_url)
32
33 for row in page.xpath("//tr"):
34
35 # Skip any a header row.
36 if set(child.tag for child in row) == set(['th']):
37 continue
38
39 vacancy_check = row.xpath('./td/text()')[1]
40 if 'Vacant' in vacancy_check:
41 self.logger.warning("Vacant Seat")
42 continue
43
44 partyInit = row.xpath('td[3]')[0].text.split()[0]
45 party = parties[partyInit]
46 district = row.xpath('td[5]/a')[0].text.split()[1]
47 address = row.xpath('td[6]')[0].text_content()
48 # 301 6th Avenue North Suite
49 address = address.replace('LP',
50 'Legislative Plaza\nNashville, TN 37243')
51 address = address.replace('WMB',
52 'War Memorial Building\nNashville, TN 37243')
53 address = '301 6th Avenue North\nSuite ' + address
54 phone = [
55 x.strip() for x in
56 row.xpath('td[7]//text()')
57 if x.strip()
58 ][0]
59
60 email = HTMLParser.HTMLParser().unescape(
61 row.xpath('td[1]/a/@href')[0][len("mailto:"): ])
62 member_url = (root_url + url_chamber_name + '/members/' + abbr +
63 district + '.html')
64 member_photo_url = (root_url + url_chamber_name +
65 '/members/images/' + abbr + district + '.jpg')
66
67 try:
68 member_page = self.get(member_url, allow_redirects=False).text
69 except (TypeError, HTTPError):
70 try:
71 member_url = row.xpath('td[2]/a/@href')[0]
72 member_page = self.get(member_url, allow_redirects=False).text
73 except (TypeError, HTTPError):
74 self.logger.warning("Valid member page does not exist.")
75 continue
76
77 member_page = lxml.html.fromstring(member_page)
78 try:
79 name = member_page.xpath('body/div/div/h1/text()')[0]
80 except IndexError:
81 name = member_page.xpath('//div[@id="membertitle"]/h2/text()')[0]
82
83 if 'Speaker' in name:
84 full_name = name[8:len(name)]
85 elif 'Lt.' in name:
86 full_name = name[13:len(name)]
87 elif abbr == 'h':
88 full_name = name[len("Representative "): len(name)]
89 else:
90 full_name = name[8:len(name)]
91
92 leg = Legislator(term, chamber, district, full_name.strip(),
93 party=party, url=member_url,
94 photo_url=member_photo_url)
95 leg.add_source(chamber_url)
96 leg.add_source(member_url)
97
98 # TODO: add district address from this page
99
100 leg.add_office('capitol', 'Nashville Address',
101 address=address, phone=phone, email=email)
102
103 self.save_legislator(leg)
104
[end of openstates/tn/legislators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/tn/legislators.py b/openstates/tn/legislators.py
--- a/openstates/tn/legislators.py
+++ b/openstates/tn/legislators.py
@@ -76,7 +76,7 @@
member_page = lxml.html.fromstring(member_page)
try:
- name = member_page.xpath('body/div/div/h1/text()')[0]
+ name = member_page.xpath('//div/div/h1/text()')[0]
except IndexError:
name = member_page.xpath('//div[@id="membertitle"]/h2/text()')[0]
| {"golden_diff": "diff --git a/openstates/tn/legislators.py b/openstates/tn/legislators.py\n--- a/openstates/tn/legislators.py\n+++ b/openstates/tn/legislators.py\n@@ -76,7 +76,7 @@\n \n member_page = lxml.html.fromstring(member_page)\n try:\n- name = member_page.xpath('body/div/div/h1/text()')[0]\n+ name = member_page.xpath('//div/div/h1/text()')[0]\n except IndexError:\n name = member_page.xpath('//div[@id=\"membertitle\"]/h2/text()')[0]\n", "issue": "TN: 109th Session, Legislators, Upper - Senator name extraction fails for district 19\nTrying to run the scraper locally, I encountered the following exception for TN's [19th senate district in the 109th general assembly](http://www.capitol.tn.gov/senate/archives/109GA/members/s19.html) (Sen. Thelma Hale):\r\n\r\n```\r\nFile \"/usr/local/bin/billy-update\", line 9, in <module>\r\n load_entry_point('billy==1.8.4', 'console_scripts', 'billy-update')()\r\n File \"/opt/sunlightfoundation.com/billy/billy/bin/update.py\", line 377, in main\r\n run_record += _run_scraper(stype, args, metadata)\r\n File \"/opt/sunlightfoundation.com/billy/billy/bin/update.py\", line 101, in _run_scraper\r\n scraper.scrape(chamber, time)\r\n File \"/srv/openstates-web/openstates/tn/legislators.py\", line 81, in scrape\r\n name = member_page.xpath('//div[@id=\"membertitle\"]/h2/text()')[0]\r\nIndexError: list index out of range\r\n```\n", "before_files": [{"content": "import HTMLParser\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nimport lxml.html\nfrom scrapelib import HTTPError\nfrom openstates.utils import LXMLMixin\n\nclass TNLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'tn'\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=False)\n root_url = 'http://www.capitol.tn.gov/'\n parties = {'D': 'Democratic', 'R': 'Republican',\n 'CCR': 'Carter County Republican',\n 'I': 'Independent'}\n\n #testing for chamber\n if chamber == 'upper':\n url_chamber_name = 'senate'\n abbr = 's'\n else:\n url_chamber_name = 'house'\n abbr = 'h'\n if term != self.metadata[\"terms\"][-1][\"sessions\"][0]:\n chamber_url = root_url + url_chamber_name\n chamber_url += '/archives/' + term + 'GA/Members/index.html'\n else:\n chamber_url = root_url + url_chamber_name + '/members/'\n\n page = self.lxmlize(chamber_url)\n\n for row in page.xpath(\"//tr\"):\n\n # Skip any a header row.\n if set(child.tag for child in row) == set(['th']):\n continue\n\n vacancy_check = row.xpath('./td/text()')[1]\n if 'Vacant' in vacancy_check:\n self.logger.warning(\"Vacant Seat\")\n continue\n\n partyInit = row.xpath('td[3]')[0].text.split()[0]\n party = parties[partyInit]\n district = row.xpath('td[5]/a')[0].text.split()[1]\n address = row.xpath('td[6]')[0].text_content()\n # 301 6th Avenue North Suite\n address = address.replace('LP',\n 'Legislative Plaza\\nNashville, TN 37243')\n address = address.replace('WMB',\n 'War Memorial Building\\nNashville, TN 37243')\n address = '301 6th Avenue North\\nSuite ' + address\n phone = [\n x.strip() for x in\n row.xpath('td[7]//text()')\n if x.strip()\n ][0]\n\n email = HTMLParser.HTMLParser().unescape(\n row.xpath('td[1]/a/@href')[0][len(\"mailto:\"): ])\n member_url = (root_url + url_chamber_name + '/members/' + abbr +\n district + '.html')\n member_photo_url = (root_url + url_chamber_name +\n '/members/images/' + abbr + district + '.jpg')\n\n try:\n member_page = self.get(member_url, allow_redirects=False).text\n except (TypeError, HTTPError):\n try:\n member_url = row.xpath('td[2]/a/@href')[0]\n member_page = self.get(member_url, allow_redirects=False).text\n except (TypeError, HTTPError):\n self.logger.warning(\"Valid member page does not exist.\")\n continue\n\n member_page = lxml.html.fromstring(member_page)\n try:\n name = member_page.xpath('body/div/div/h1/text()')[0]\n except IndexError:\n name = member_page.xpath('//div[@id=\"membertitle\"]/h2/text()')[0]\n \n if 'Speaker' in name:\n full_name = name[8:len(name)]\n elif 'Lt.' in name:\n full_name = name[13:len(name)]\n elif abbr == 'h':\n full_name = name[len(\"Representative \"): len(name)]\n else:\n full_name = name[8:len(name)]\n\n leg = Legislator(term, chamber, district, full_name.strip(),\n party=party, url=member_url,\n photo_url=member_photo_url)\n leg.add_source(chamber_url)\n leg.add_source(member_url)\n\n # TODO: add district address from this page\n\n leg.add_office('capitol', 'Nashville Address',\n address=address, phone=phone, email=email)\n\n self.save_legislator(leg)\n", "path": "openstates/tn/legislators.py"}]} | 1,934 | 138 |
gh_patches_debug_3144 | rasdani/github-patches | git_diff | cupy__cupy-2588 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stream in the context-manager form is not used in `ElementwiseKernel` or `ReductionKernel`
This is actually a bug reported back in #1695 that unfortunately went unnoticed.
In `examples/stream/map_reduce.py`, a list of streams was created for executing `cupy.matmul()` in parallel, which is backed by a `ReductionKernel` in this case: https://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/core.pyx#L2513-L2516
However, inspecting the implementation I found that `ReductionKernel` only accepts an explicit `stream` argument; it does not pick up any current stream: https://github.com/cupy/cupy/blob/32718607a7808ec6bc3a24cf9231a9351f8fc95e/cupy/core/reduction.pxi#L396
In other words, that example was misleading because those streams were not used at all and so all executions were serialized, as can be checked from nvprof + nvvp (see the circle in red):
<img width="972" alt="螢幕快照 2019-10-03 上午11 24 27" src="https://user-images.githubusercontent.com/5534781/66140715-978bf180-e5d0-11e9-8228-f613a3eba6fd.png">
The same bug also appears in `ElementwiseKernel`:
https://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/_kernel.pyx#L537
In my opinion, unlike `RawKernel` which is not used by any CuPy core functionalities, `ElementwiseKernel` and `ReductionKernel` should honor the current stream by checking the current stream pointer if no stream argument is explicitly given, since many CuPy functions like `cupy.matmul()` do not support passing in a stream. A similar approach is already adopted in the FFT module, see #2362.
</issue>
<code>
[start of examples/stream/map_reduce.py]
1 import cupy
2 import time
3
4 device = cupy.cuda.Device()
5 memory_pool = cupy.cuda.MemoryPool()
6 cupy.cuda.set_allocator(memory_pool.malloc)
7 rand = cupy.random.generator.RandomState(seed=1)
8
9 n = 10
10 zs = []
11 map_streams = []
12 stop_events = []
13 reduce_stream = cupy.cuda.stream.Stream()
14 for i in range(n):
15 map_streams.append(cupy.cuda.stream.Stream())
16
17 start_time = time.time()
18
19 # Map
20 for stream in map_streams:
21 with stream:
22 x = rand.normal(size=(1, 1024 * 256))
23 y = rand.normal(size=(1024 * 256, 1))
24 z = cupy.matmul(x, y)
25 zs.append(z)
26 stop_event = stream.record()
27 stop_events.append(stop_event)
28
29 # Block the `reduce_stream` until all events occur. This does not block host.
30 # This is not required when reduction is performed in the default (Stream.null)
31 # stream unless streams are created with `non_blocking=True` flag.
32 for i in range(n):
33 reduce_stream.wait_event(stop_events[i])
34
35 # Reduce
36 with reduce_stream:
37 z = sum(zs)
38
39 device.synchronize()
40 elapsed_time = time.time() - start_time
41 print('elapsed time', elapsed_time)
42 print('total bytes', memory_pool.total_bytes())
43
44 # Free all blocks in the memory pool of streams
45 for stream in map_streams:
46 memory_pool.free_all_blocks(stream=stream)
47 print('total bytes', memory_pool.total_bytes())
48
[end of examples/stream/map_reduce.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/stream/map_reduce.py b/examples/stream/map_reduce.py
--- a/examples/stream/map_reduce.py
+++ b/examples/stream/map_reduce.py
@@ -19,8 +19,8 @@
# Map
for stream in map_streams:
with stream:
- x = rand.normal(size=(1, 1024 * 256))
- y = rand.normal(size=(1024 * 256, 1))
+ x = rand.normal(size=(1, 1024**2))
+ y = rand.normal(size=(1024**2, 1))
z = cupy.matmul(x, y)
zs.append(z)
stop_event = stream.record()
| {"golden_diff": "diff --git a/examples/stream/map_reduce.py b/examples/stream/map_reduce.py\n--- a/examples/stream/map_reduce.py\n+++ b/examples/stream/map_reduce.py\n@@ -19,8 +19,8 @@\n # Map\n for stream in map_streams:\n with stream:\n- x = rand.normal(size=(1, 1024 * 256))\n- y = rand.normal(size=(1024 * 256, 1))\n+ x = rand.normal(size=(1, 1024**2))\n+ y = rand.normal(size=(1024**2, 1))\n z = cupy.matmul(x, y)\n zs.append(z)\n stop_event = stream.record()\n", "issue": "Stream in the context-manager form is not used in `ElementwiseKernel` or `ReductionKernel`\nThis is actually a bug reported back in #1695 that unfortunately went unnoticed. \r\n\r\nIn `examples/stream/map_reduce.py`, a list of streams was created for executing `cupy.matmul()` in parallel, which is backed by a `ReductionKernel` in this case: https://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/core.pyx#L2513-L2516\r\nHowever, inspecting the implementation I found that `ReductionKernel` only accepts an explicit `stream` argument; it does not pick up any current stream: https://github.com/cupy/cupy/blob/32718607a7808ec6bc3a24cf9231a9351f8fc95e/cupy/core/reduction.pxi#L396\r\nIn other words, that example was misleading because those streams were not used at all and so all executions were serialized, as can be checked from nvprof + nvvp (see the circle in red):\r\n<img width=\"972\" alt=\"\u87a2\u5e55\u5feb\u7167 2019-10-03 \u4e0a\u534811 24 27\" src=\"https://user-images.githubusercontent.com/5534781/66140715-978bf180-e5d0-11e9-8228-f613a3eba6fd.png\">\r\n\r\nThe same bug also appears in `ElementwiseKernel`:\r\nhttps://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/_kernel.pyx#L537\r\n\r\nIn my opinion, unlike `RawKernel` which is not used by any CuPy core functionalities, `ElementwiseKernel` and `ReductionKernel` should honor the current stream by checking the current stream pointer if no stream argument is explicitly given, since many CuPy functions like `cupy.matmul()` do not support passing in a stream. A similar approach is already adopted in the FFT module, see #2362.\n", "before_files": [{"content": "import cupy\nimport time\n\ndevice = cupy.cuda.Device()\nmemory_pool = cupy.cuda.MemoryPool()\ncupy.cuda.set_allocator(memory_pool.malloc)\nrand = cupy.random.generator.RandomState(seed=1)\n\nn = 10\nzs = []\nmap_streams = []\nstop_events = []\nreduce_stream = cupy.cuda.stream.Stream()\nfor i in range(n):\n map_streams.append(cupy.cuda.stream.Stream())\n\nstart_time = time.time()\n\n# Map\nfor stream in map_streams:\n with stream:\n x = rand.normal(size=(1, 1024 * 256))\n y = rand.normal(size=(1024 * 256, 1))\n z = cupy.matmul(x, y)\n zs.append(z)\n stop_event = stream.record()\n stop_events.append(stop_event)\n\n# Block the `reduce_stream` until all events occur. This does not block host.\n# This is not required when reduction is performed in the default (Stream.null)\n# stream unless streams are created with `non_blocking=True` flag.\nfor i in range(n):\n reduce_stream.wait_event(stop_events[i])\n\n# Reduce\nwith reduce_stream:\n z = sum(zs)\n\ndevice.synchronize()\nelapsed_time = time.time() - start_time\nprint('elapsed time', elapsed_time)\nprint('total bytes', memory_pool.total_bytes())\n\n# Free all blocks in the memory pool of streams\nfor stream in map_streams:\n memory_pool.free_all_blocks(stream=stream)\nprint('total bytes', memory_pool.total_bytes())\n", "path": "examples/stream/map_reduce.py"}]} | 1,469 | 158 |
gh_patches_debug_38311 | rasdani/github-patches | git_diff | conan-io__conan-center-index-3991 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] resiprocate/1.12.0: Error in autotools configuration
There are several errors in the recipe, especially in autotools configuration step.
https://github.com/conan-io/conan-center-index/blob/c68b60af3de84cf460c7512efce75290f1b395cb/recipes/resiprocate/all/conanfile.py#L50-L58
First, in the lines 54~57, values passed to each options are the inverse of what a consumer actually gave. For example, if `with_ssl` is true, then configuration option is passed as `--with-ssl=no`.
Second, the configure script of the project doesn't understand value assignments to the following options : ssl, mysql, postgresql. In other words, `--with-ssl=yes` and `--with-ssl=no` would make no difference in the configuration step and eventually the library is built with ssl feature on in both cases. You can check this out either from resiprocate project's [`configure.ac` file](https://github.com/resiprocate/resiprocate/blob/6b2756ba8516726cfb04e2b2fa5f4e3e67598a31/configure.ac#L113-L119), or from `configure` file autogenerated from `configure.ac` as below.
```
# Check whether --with-ssl was given.
if test "${with_ssl+set}" = set; then :
withval=$with_ssl;
cat >>confdefs.h <<_ACEOF
#define USE_SSL /**/
_ACEOF
LIBSSL_LIBADD="-lssl -lcrypto"
if true; then
USE_SSL_TRUE=
USE_SSL_FALSE='#'
else
USE_SSL_TRUE='#'
USE_SSL_FALSE=
fi
else
LIBSSL_LIBADD=""
fi
```
And lastly, the project requires pthread, and the recipe didn't include that. When I turn off ssl, mysql, postgresql options, linking the library causes pthread missing error. I guess pthread happened to be secondary dependency from ssl or mysql or postgresql library, and that's why it worked when the options were on even though the recipe didn't explicitly include pthread. I suggest we add pthread in `cpp_info.system_libs`
I'm really thankful to those who had worked in this package. I didn't expect it to be on conan ever, but now it's so nice and easy to use. Happy holidays guys!
</issue>
<code>
[start of recipes/resiprocate/all/conanfile.py]
1 import os
2 from conans import ConanFile, AutoToolsBuildEnvironment, tools
3 from conans.errors import ConanInvalidConfiguration
4
5
6 required_conan_version = ">=1.29.1"
7
8 class ResiprocateConan(ConanFile):
9 name = "resiprocate"
10 description = "The project is dedicated to maintaining a complete, correct, and commercially usable implementation of SIP and a few related protocols. "
11 topics = ("sip", "voip", "communication", "signaling")
12 url = "https://github.com/conan-io/conan-center-index"
13 homepage = "http://www.resiprocate.org"
14 license = "VSL-1.0"
15 settings = "os", "compiler", "build_type", "arch"
16 options = {"fPIC": [True, False],
17 "shared": [True, False],
18 "with_ssl": [True, False],
19 "with_postgresql": [True, False],
20 "with_mysql": [True, False]}
21 default_options = {"fPIC": True,
22 "shared": False,
23 "with_ssl": True,
24 "with_postgresql": True,
25 "with_mysql": True}
26 _autotools = None
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 def requirements(self):
33 if self.settings.os in ("Windows", "Macos"):
34 raise ConanInvalidConfiguration("reSIProcate is not support on {}.".format(self.settings.os))
35 if self.options.with_ssl:
36 self.requires("openssl/1.1.1h")
37 if self.options.with_postgresql:
38 self.requires("libpq/11.5")
39 if self.options.with_mysql:
40 self.requires("libmysqlclient/8.0.17")
41
42 def source(self):
43 tools.get(**self.conan_data["sources"][self.version])
44 os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
45
46 def _configure_autotools(self):
47 if self._autotools:
48 return self._autotools
49 self._autotools = AutoToolsBuildEnvironment(self)
50 yes_no = lambda v: "yes" if v else "no"
51 configure_args = [
52 "--enable-shared={}".format(yes_no(self.options.shared)),
53 "--enable-static={}".format(yes_no(not self.options.shared)),
54 "--with-ssl={}".format(yes_no(not self.options.with_ssl)),
55 "--with-mysql={}".format(yes_no(not self.options.with_mysql)),
56 "--with-postgresql={}".format(yes_no(not self.options.with_postgresql)),
57 "--with-pic={}".format(yes_no(not self.options.fPIC))
58 ]
59
60 self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)
61 return self._autotools
62
63 def build(self):
64 autotools = self._configure_autotools()
65 autotools.make()
66
67 def package(self):
68 self.copy("COPYING", src=self._source_subfolder, dst="licenses")
69 autotools = self._configure_autotools()
70 autotools.install()
71 tools.rmdir(os.path.join(os.path.join(self.package_folder, "share")))
72 tools.remove_files_by_mask(os.path.join(self.package_folder), "*.la")
73
74 def package_info(self):
75 self.cpp_info.libs = ["resip", "rutil", "dum", "resipares"]
76 bin_path = os.path.join(self.package_folder, "bin")
77 self.output.info("Appending PATH environment variable: {}".format(bin_path))
78 self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
79
[end of recipes/resiprocate/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/resiprocate/all/conanfile.py b/recipes/resiprocate/all/conanfile.py
--- a/recipes/resiprocate/all/conanfile.py
+++ b/recipes/resiprocate/all/conanfile.py
@@ -29,13 +29,22 @@
def _source_subfolder(self):
return "source_subfolder"
- def requirements(self):
+ def config_options(self):
+ if self.settings.os == 'Windows':
+ del self.options.fPIC
+
+ def configure(self):
if self.settings.os in ("Windows", "Macos"):
- raise ConanInvalidConfiguration("reSIProcate is not support on {}.".format(self.settings.os))
+ # FIXME: Visual Studio project & Mac support seems available in resiprocate
+ raise ConanInvalidConfiguration("reSIProcate recipe does not currently support {}.".format(self.settings.os))
+ if self.options.shared:
+ del self.options.fPIC
+
+ def requirements(self):
if self.options.with_ssl:
- self.requires("openssl/1.1.1h")
+ self.requires("openssl/1.1.1i")
if self.options.with_postgresql:
- self.requires("libpq/11.5")
+ self.requires("libpq/11.9")
if self.options.with_mysql:
self.requires("libmysqlclient/8.0.17")
@@ -51,12 +60,17 @@
configure_args = [
"--enable-shared={}".format(yes_no(self.options.shared)),
"--enable-static={}".format(yes_no(not self.options.shared)),
- "--with-ssl={}".format(yes_no(not self.options.with_ssl)),
- "--with-mysql={}".format(yes_no(not self.options.with_mysql)),
- "--with-postgresql={}".format(yes_no(not self.options.with_postgresql)),
- "--with-pic={}".format(yes_no(not self.options.fPIC))
+ "--with-pic={}".format(yes_no(self.options.get_safe("fPIC", True)))
]
+ # These options do not support yes/no
+ if self.options.with_ssl:
+ configure_args.append("--with-ssl")
+ if self.options.with_mysql:
+ configure_args.append("--with-mysql")
+ if self.options.with_postgresql:
+ configure_args.append("--with-postgresql")
+
self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)
return self._autotools
@@ -73,6 +87,8 @@
def package_info(self):
self.cpp_info.libs = ["resip", "rutil", "dum", "resipares"]
+ if self.settings.os in ("Linux", "FreeBSD"):
+ self.cpp_info.system_libs = ["pthread"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
| {"golden_diff": "diff --git a/recipes/resiprocate/all/conanfile.py b/recipes/resiprocate/all/conanfile.py\n--- a/recipes/resiprocate/all/conanfile.py\n+++ b/recipes/resiprocate/all/conanfile.py\n@@ -29,13 +29,22 @@\n def _source_subfolder(self):\n return \"source_subfolder\"\n \n- def requirements(self):\n+ def config_options(self):\n+ if self.settings.os == 'Windows':\n+ del self.options.fPIC\n+\n+ def configure(self):\n if self.settings.os in (\"Windows\", \"Macos\"):\n- raise ConanInvalidConfiguration(\"reSIProcate is not support on {}.\".format(self.settings.os))\n+ # FIXME: Visual Studio project & Mac support seems available in resiprocate\n+ raise ConanInvalidConfiguration(\"reSIProcate recipe does not currently support {}.\".format(self.settings.os))\n+ if self.options.shared:\n+ del self.options.fPIC\n+\n+ def requirements(self):\n if self.options.with_ssl:\n- self.requires(\"openssl/1.1.1h\")\n+ self.requires(\"openssl/1.1.1i\")\n if self.options.with_postgresql:\n- self.requires(\"libpq/11.5\")\n+ self.requires(\"libpq/11.9\")\n if self.options.with_mysql:\n self.requires(\"libmysqlclient/8.0.17\")\n \n@@ -51,12 +60,17 @@\n configure_args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n- \"--with-ssl={}\".format(yes_no(not self.options.with_ssl)),\n- \"--with-mysql={}\".format(yes_no(not self.options.with_mysql)),\n- \"--with-postgresql={}\".format(yes_no(not self.options.with_postgresql)),\n- \"--with-pic={}\".format(yes_no(not self.options.fPIC))\n+ \"--with-pic={}\".format(yes_no(self.options.get_safe(\"fPIC\", True)))\n ]\n \n+ # These options do not support yes/no\n+ if self.options.with_ssl:\n+ configure_args.append(\"--with-ssl\")\n+ if self.options.with_mysql:\n+ configure_args.append(\"--with-mysql\")\n+ if self.options.with_postgresql:\n+ configure_args.append(\"--with-postgresql\")\n+ \n self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)\n return self._autotools\n \n@@ -73,6 +87,8 @@\n \n def package_info(self):\n self.cpp_info.libs = [\"resip\", \"rutil\", \"dum\", \"resipares\"]\n+ if self.settings.os in (\"Linux\", \"FreeBSD\"):\n+ self.cpp_info.system_libs = [\"pthread\"]\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "issue": "[package] resiprocate/1.12.0: Error in autotools configuration\nThere are several errors in the recipe, especially in autotools configuration step.\r\n\r\nhttps://github.com/conan-io/conan-center-index/blob/c68b60af3de84cf460c7512efce75290f1b395cb/recipes/resiprocate/all/conanfile.py#L50-L58\r\n\r\nFirst, in the lines 54~57, values passed to each options are the inverse of what a consumer actually gave. For example, if `with_ssl` is true, then configuration option is passed as `--with-ssl=no`.\r\n\r\nSecond, the configure script of the project doesn't understand value assignments to the following options : ssl, mysql, postgresql. In other words, `--with-ssl=yes` and `--with-ssl=no` would make no difference in the configuration step and eventually the library is built with ssl feature on in both cases. You can check this out either from resiprocate project's [`configure.ac` file](https://github.com/resiprocate/resiprocate/blob/6b2756ba8516726cfb04e2b2fa5f4e3e67598a31/configure.ac#L113-L119), or from `configure` file autogenerated from `configure.ac` as below. \r\n\r\n```\r\n# Check whether --with-ssl was given.\r\nif test \"${with_ssl+set}\" = set; then :\r\n withval=$with_ssl;\r\ncat >>confdefs.h <<_ACEOF\r\n#define USE_SSL /**/\r\n_ACEOF\r\n\r\n LIBSSL_LIBADD=\"-lssl -lcrypto\"\r\n\r\n if true; then\r\n USE_SSL_TRUE=\r\n USE_SSL_FALSE='#'\r\nelse\r\n USE_SSL_TRUE='#'\r\n USE_SSL_FALSE=\r\nfi\r\n\r\nelse\r\n LIBSSL_LIBADD=\"\"\r\n\r\nfi\r\n```\r\n\r\nAnd lastly, the project requires pthread, and the recipe didn't include that. When I turn off ssl, mysql, postgresql options, linking the library causes pthread missing error. I guess pthread happened to be secondary dependency from ssl or mysql or postgresql library, and that's why it worked when the options were on even though the recipe didn't explicitly include pthread. I suggest we add pthread in `cpp_info.system_libs`\r\n\r\nI'm really thankful to those who had worked in this package. I didn't expect it to be on conan ever, but now it's so nice and easy to use. Happy holidays guys! \n", "before_files": [{"content": "import os\nfrom conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nrequired_conan_version = \">=1.29.1\"\n\nclass ResiprocateConan(ConanFile):\n name = \"resiprocate\"\n description = \"The project is dedicated to maintaining a complete, correct, and commercially usable implementation of SIP and a few related protocols. \"\n topics = (\"sip\", \"voip\", \"communication\", \"signaling\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.resiprocate.org\"\n license = \"VSL-1.0\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"fPIC\": [True, False],\n \"shared\": [True, False],\n \"with_ssl\": [True, False],\n \"with_postgresql\": [True, False],\n \"with_mysql\": [True, False]}\n default_options = {\"fPIC\": True,\n \"shared\": False,\n \"with_ssl\": True,\n \"with_postgresql\": True,\n \"with_mysql\": True}\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def requirements(self):\n if self.settings.os in (\"Windows\", \"Macos\"):\n raise ConanInvalidConfiguration(\"reSIProcate is not support on {}.\".format(self.settings.os))\n if self.options.with_ssl:\n self.requires(\"openssl/1.1.1h\")\n if self.options.with_postgresql:\n self.requires(\"libpq/11.5\")\n if self.options.with_mysql:\n self.requires(\"libmysqlclient/8.0.17\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self)\n yes_no = lambda v: \"yes\" if v else \"no\"\n configure_args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n \"--with-ssl={}\".format(yes_no(not self.options.with_ssl)),\n \"--with-mysql={}\".format(yes_no(not self.options.with_mysql)),\n \"--with-postgresql={}\".format(yes_no(not self.options.with_postgresql)),\n \"--with-pic={}\".format(yes_no(not self.options.fPIC))\n ]\n\n self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)\n return self._autotools\n\n def build(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(os.path.join(self.package_folder, \"share\")))\n tools.remove_files_by_mask(os.path.join(self.package_folder), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = [\"resip\", \"rutil\", \"dum\", \"resipares\"]\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "path": "recipes/resiprocate/all/conanfile.py"}]} | 2,042 | 671 |
gh_patches_debug_4573 | rasdani/github-patches | git_diff | pre-commit__pre-commit-235 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some versions of git don't create .git/hooks directory
Noticed here: https://github.com/victorlin/bugbuzz-python/pull/1#issuecomment-104971132
</issue>
<code>
[start of pre_commit/commands/install_uninstall.py]
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import io
5 import logging
6 import os
7 import os.path
8 import stat
9 import sys
10
11 from pre_commit.logging_handler import LoggingHandler
12 from pre_commit.util import resource_filename
13
14
15 logger = logging.getLogger('pre_commit')
16
17
18 # This is used to identify the hook file we install
19 PREVIOUS_IDENTIFYING_HASHES = (
20 '4d9958c90bc262f47553e2c073f14cfe',
21 'd8ee923c46731b42cd95cc869add4062',
22 '49fd668cb42069aa1b6048464be5d395',
23 )
24
25
26 IDENTIFYING_HASH = '79f09a650522a87b0da915d0d983b2de'
27
28
29 def is_our_pre_commit(filename):
30 return IDENTIFYING_HASH in io.open(filename).read()
31
32
33 def is_previous_pre_commit(filename):
34 contents = io.open(filename).read()
35 return any(hash in contents for hash in PREVIOUS_IDENTIFYING_HASHES)
36
37
38 def make_executable(filename):
39 original_mode = os.stat(filename).st_mode
40 os.chmod(
41 filename,
42 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
43 )
44
45
46 def install(runner, overwrite=False, hooks=False, hook_type='pre-commit'):
47 """Install the pre-commit hooks."""
48 hook_path = runner.get_hook_path(hook_type)
49 legacy_path = hook_path + '.legacy'
50
51 # If we have an existing hook, move it to pre-commit.legacy
52 if (
53 os.path.exists(hook_path) and
54 not is_our_pre_commit(hook_path) and
55 not is_previous_pre_commit(hook_path)
56 ):
57 os.rename(hook_path, legacy_path)
58
59 # If we specify overwrite, we simply delete the legacy file
60 if overwrite and os.path.exists(legacy_path):
61 os.remove(legacy_path)
62 elif os.path.exists(legacy_path):
63 print(
64 'Running in migration mode with existing hooks at {0}\n'
65 'Use -f to use only pre-commit.'.format(
66 legacy_path,
67 )
68 )
69
70 with io.open(hook_path, 'w') as pre_commit_file_obj:
71 if hook_type == 'pre-push':
72 with io.open(resource_filename('pre-push-tmpl')) as fp:
73 pre_push_contents = fp.read()
74 else:
75 pre_push_contents = ''
76
77 contents = io.open(resource_filename('hook-tmpl')).read().format(
78 sys_executable=sys.executable,
79 hook_type=hook_type,
80 pre_push=pre_push_contents,
81 )
82 pre_commit_file_obj.write(contents)
83 make_executable(hook_path)
84
85 print('pre-commit installed at {0}'.format(hook_path))
86
87 # If they requested we install all of the hooks, do so.
88 if hooks:
89 # Set up our logging handler
90 logger.addHandler(LoggingHandler(False))
91 logger.setLevel(logging.INFO)
92 for repository in runner.repositories:
93 repository.require_installed()
94
95 return 0
96
97
98 def uninstall(runner, hook_type='pre-commit'):
99 """Uninstall the pre-commit hooks."""
100 hook_path = runner.get_hook_path(hook_type)
101 legacy_path = hook_path + '.legacy'
102 # If our file doesn't exist or it isn't ours, gtfo.
103 if (
104 not os.path.exists(hook_path) or (
105 not is_our_pre_commit(hook_path) and
106 not is_previous_pre_commit(hook_path)
107 )
108 ):
109 return 0
110
111 os.remove(hook_path)
112 print('{0} uninstalled'.format(hook_type))
113
114 if os.path.exists(legacy_path):
115 os.rename(legacy_path, hook_path)
116 print('Restored previous hooks to {0}'.format(hook_path))
117
118 return 0
119
[end of pre_commit/commands/install_uninstall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -48,6 +48,9 @@
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
+ if not os.path.exists(os.path.dirname(hook_path)):
+ os.makedirs(os.path.dirname(hook_path))
+
# If we have an existing hook, move it to pre-commit.legacy
if (
os.path.exists(hook_path) and
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -48,6 +48,9 @@\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n \n+ if not os.path.exists(os.path.dirname(hook_path)):\n+ os.makedirs(os.path.dirname(hook_path))\n+\n # If we have an existing hook, move it to pre-commit.legacy\n if (\n os.path.exists(hook_path) and\n", "issue": "Some versions of git don't create .git/hooks directory\nNoticed here: https://github.com/victorlin/bugbuzz-python/pull/1#issuecomment-104971132\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os\nimport os.path\nimport stat\nimport sys\n\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.util import resource_filename\n\n\nlogger = logging.getLogger('pre_commit')\n\n\n# This is used to identify the hook file we install\nPREVIOUS_IDENTIFYING_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n)\n\n\nIDENTIFYING_HASH = '79f09a650522a87b0da915d0d983b2de'\n\n\ndef is_our_pre_commit(filename):\n return IDENTIFYING_HASH in io.open(filename).read()\n\n\ndef is_previous_pre_commit(filename):\n contents = io.open(filename).read()\n return any(hash in contents for hash in PREVIOUS_IDENTIFYING_HASHES)\n\n\ndef make_executable(filename):\n original_mode = os.stat(filename).st_mode\n os.chmod(\n filename,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n\ndef install(runner, overwrite=False, hooks=False, hook_type='pre-commit'):\n \"\"\"Install the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n\n # If we have an existing hook, move it to pre-commit.legacy\n if (\n os.path.exists(hook_path) and\n not is_our_pre_commit(hook_path) and\n not is_previous_pre_commit(hook_path)\n ):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n print(\n 'Running in migration mode with existing hooks at {0}\\n'\n 'Use -f to use only pre-commit.'.format(\n legacy_path,\n )\n )\n\n with io.open(hook_path, 'w') as pre_commit_file_obj:\n if hook_type == 'pre-push':\n with io.open(resource_filename('pre-push-tmpl')) as fp:\n pre_push_contents = fp.read()\n else:\n pre_push_contents = ''\n\n contents = io.open(resource_filename('hook-tmpl')).read().format(\n sys_executable=sys.executable,\n hook_type=hook_type,\n pre_push=pre_push_contents,\n )\n pre_commit_file_obj.write(contents)\n make_executable(hook_path)\n\n print('pre-commit installed at {0}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n # Set up our logging handler\n logger.addHandler(LoggingHandler(False))\n logger.setLevel(logging.INFO)\n for repository in runner.repositories:\n repository.require_installed()\n\n return 0\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if (\n not os.path.exists(hook_path) or (\n not is_our_pre_commit(hook_path) and\n not is_previous_pre_commit(hook_path)\n )\n ):\n return 0\n\n os.remove(hook_path)\n print('{0} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n print('Restored previous hooks to {0}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 1,726 | 136 |
gh_patches_debug_2733 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-805 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ADMIN - Tronçon bouclant sur lui-même
Impossible de saisir le CIRCUIT DES LACS correctement.
Renvoie souvent une 504 BAD GATEWAY quand on enregistre. L'itinéraire a pourtant été modifié mais différemment de la façon dont il a été saisi. A creuser.
</issue>
<code>
[start of geotrek/core/forms.py]
1 from django.utils.translation import ugettext_lazy as _
2
3 import floppyforms as forms
4
5 from geotrek.common.forms import CommonForm
6 from .models import Path
7 from .helpers import PathHelper
8 from .fields import TopologyField, SnappedLineStringField
9
10
11 class TopologyForm(CommonForm):
12 """
13 This form is a bit specific :
14
15 We use a field (topology) in order to edit the whole instance.
16 Thus, at init, we load the instance into field, and at save, we
17 save the field into the instance.
18
19 The geom field is fully ignored, since we edit a topology.
20 """
21 topology = TopologyField(label="")
22
23 def __init__(self, *args, **kwargs):
24 super(TopologyForm, self).__init__(*args, **kwargs)
25 if self.instance and self.instance.pk:
26 self.fields['topology'].initial = self.instance
27
28 def clean(self, *args, **kwargs):
29 data = super(TopologyForm, self).clean()
30 # geom is computed at db-level and never edited
31 if 'geom' in self.errors:
32 del self.errors['geom']
33 return data
34
35 def save(self, *args, **kwargs):
36 topology = self.cleaned_data.pop('topology')
37 instance = super(TopologyForm, self).save(*args, **kwargs)
38 instance.mutate(topology)
39 return instance
40
41 geomfields = ['topology']
42
43 class Meta(CommonForm.Meta):
44 fields = CommonForm.Meta.fields + ['topology']
45
46 MEDIA_JS = ("core/dijkstra.js",
47 "core/leaflet-geomutils.js",
48 "core/multipath.js",
49 "core/topology_helper.js") + CommonForm.MEDIA_JS
50
51
52 class PathForm(CommonForm):
53 geom = SnappedLineStringField()
54
55 reverse_geom = forms.BooleanField(required=False,
56 label=_("Reverse path"),
57 help_text=_("The path will be reversed once saved"))
58
59 geomfields = ['geom']
60
61 class Meta(CommonForm.Meta):
62 model = Path
63 fields = CommonForm.Meta.fields + \
64 ['structure',
65 'name', 'stake', 'comfort', 'trail', 'departure', 'arrival', 'comments',
66 'datasource', 'networks', 'usages', 'valid', 'reverse_geom', 'geom']
67
68 def __init__(self, *args, **kwargs):
69 super(PathForm, self).__init__(*args, **kwargs)
70 self.fields['geom'].label = ''
71
72 def clean_geom(self):
73 geom = self.cleaned_data['geom']
74 if geom is None:
75 raise forms.ValidationError(_("Invalid snapped geometry."))
76 if not geom.simple:
77 raise forms.ValidationError(_("Geometry is not simple."))
78 if not PathHelper.disjoint(geom, self.cleaned_data.get('pk') or -1):
79 raise forms.ValidationError(_("Geometry overlaps another."))
80 return geom
81
82 def save(self, commit=True):
83 path = super(PathForm, self).save(commit=False)
84
85 if self.cleaned_data.get('reverse_geom'):
86 path.reverse()
87
88 if commit:
89 path.save()
90 self.save_m2m()
91
92 return path
93
[end of geotrek/core/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geotrek/core/forms.py b/geotrek/core/forms.py
--- a/geotrek/core/forms.py
+++ b/geotrek/core/forms.py
@@ -44,7 +44,6 @@
fields = CommonForm.Meta.fields + ['topology']
MEDIA_JS = ("core/dijkstra.js",
- "core/leaflet-geomutils.js",
"core/multipath.js",
"core/topology_helper.js") + CommonForm.MEDIA_JS
| {"golden_diff": "diff --git a/geotrek/core/forms.py b/geotrek/core/forms.py\n--- a/geotrek/core/forms.py\n+++ b/geotrek/core/forms.py\n@@ -44,7 +44,6 @@\n fields = CommonForm.Meta.fields + ['topology']\n \n MEDIA_JS = (\"core/dijkstra.js\",\n- \"core/leaflet-geomutils.js\",\n \"core/multipath.js\",\n \"core/topology_helper.js\") + CommonForm.MEDIA_JS\n", "issue": "ADMIN - Tron\u00e7on bouclant sur lui-m\u00eame\nImpossible de saisir le CIRCUIT DES LACS correctement.\nRenvoie souvent une 504 BAD GATEWAY quand on enregistre. L'itin\u00e9raire a pourtant \u00e9t\u00e9 modifi\u00e9 mais diff\u00e9remment de la fa\u00e7on dont il a \u00e9t\u00e9 saisi. A creuser.\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nimport floppyforms as forms\n\nfrom geotrek.common.forms import CommonForm\nfrom .models import Path\nfrom .helpers import PathHelper\nfrom .fields import TopologyField, SnappedLineStringField\n\n\nclass TopologyForm(CommonForm):\n \"\"\"\n This form is a bit specific :\n\n We use a field (topology) in order to edit the whole instance.\n Thus, at init, we load the instance into field, and at save, we\n save the field into the instance.\n\n The geom field is fully ignored, since we edit a topology.\n \"\"\"\n topology = TopologyField(label=\"\")\n\n def __init__(self, *args, **kwargs):\n super(TopologyForm, self).__init__(*args, **kwargs)\n if self.instance and self.instance.pk:\n self.fields['topology'].initial = self.instance\n\n def clean(self, *args, **kwargs):\n data = super(TopologyForm, self).clean()\n # geom is computed at db-level and never edited\n if 'geom' in self.errors:\n del self.errors['geom']\n return data\n\n def save(self, *args, **kwargs):\n topology = self.cleaned_data.pop('topology')\n instance = super(TopologyForm, self).save(*args, **kwargs)\n instance.mutate(topology)\n return instance\n\n geomfields = ['topology']\n\n class Meta(CommonForm.Meta):\n fields = CommonForm.Meta.fields + ['topology']\n\n MEDIA_JS = (\"core/dijkstra.js\",\n \"core/leaflet-geomutils.js\",\n \"core/multipath.js\",\n \"core/topology_helper.js\") + CommonForm.MEDIA_JS\n\n\nclass PathForm(CommonForm):\n geom = SnappedLineStringField()\n\n reverse_geom = forms.BooleanField(required=False,\n label=_(\"Reverse path\"),\n help_text=_(\"The path will be reversed once saved\"))\n\n geomfields = ['geom']\n\n class Meta(CommonForm.Meta):\n model = Path\n fields = CommonForm.Meta.fields + \\\n ['structure',\n 'name', 'stake', 'comfort', 'trail', 'departure', 'arrival', 'comments',\n 'datasource', 'networks', 'usages', 'valid', 'reverse_geom', 'geom']\n\n def __init__(self, *args, **kwargs):\n super(PathForm, self).__init__(*args, **kwargs)\n self.fields['geom'].label = ''\n\n def clean_geom(self):\n geom = self.cleaned_data['geom']\n if geom is None:\n raise forms.ValidationError(_(\"Invalid snapped geometry.\"))\n if not geom.simple:\n raise forms.ValidationError(_(\"Geometry is not simple.\"))\n if not PathHelper.disjoint(geom, self.cleaned_data.get('pk') or -1):\n raise forms.ValidationError(_(\"Geometry overlaps another.\"))\n return geom\n\n def save(self, commit=True):\n path = super(PathForm, self).save(commit=False)\n\n if self.cleaned_data.get('reverse_geom'):\n path.reverse()\n\n if commit:\n path.save()\n self.save_m2m()\n\n return path\n", "path": "geotrek/core/forms.py"}]} | 1,472 | 105 |
gh_patches_debug_943 | rasdani/github-patches | git_diff | microsoft__ptvsd-259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Debugging of modules using -m is broken
Used to be able to debug Flask & other code using the `-m` flag.
</issue>
<code>
[start of ptvsd/debugger.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7 # import the wrapper first, so that it gets a chance
8 # to detour pydevd socket functionality.
9 import ptvsd.wrapper
10
11
12 __author__ = "Microsoft Corporation <[email protected]>"
13 __version__ = "4.0.0a4"
14
15 DONT_DEBUG = []
16
17
18 def debug(filename, port_num, debug_id, debug_options, run_as):
19 # TODO: docstring
20 address = (None, port_num)
21 if run_as == 'module':
22 _run_module(address, filename)
23 else:
24 _run_file(address, filename)
25
26
27 def _run_module(address, modname):
28 filename = modname + ':'
29 argv = _run_argv(address, filename)
30 argv.append('--module')
31 _run(argv)
32
33
34 def _run_file(address, filename):
35 argv = _run_argv(address, filename)
36 _run(argv)
37
38
39 def _run_argv(address, filename):
40 host, port = address
41 if host is None:
42 host = '127.0.0.1'
43 return [
44 '--port', str(port),
45 '--client', host,
46 '--file', filename,
47 ]
48
49
50 def _run(argv):
51 import pydevd
52 sys.argv[1:0] = argv
53 try:
54 pydevd.main()
55 except SystemExit as ex:
56 ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)
57 raise
58
[end of ptvsd/debugger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py
--- a/ptvsd/debugger.py
+++ b/ptvsd/debugger.py
@@ -27,7 +27,7 @@
def _run_module(address, modname):
filename = modname + ':'
argv = _run_argv(address, filename)
- argv.append('--module')
+ argv.insert(argv.index('--file'), '--module')
_run(argv)
| {"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -27,7 +27,7 @@\n def _run_module(address, modname):\n filename = modname + ':'\n argv = _run_argv(address, filename)\n- argv.append('--module')\n+ argv.insert(argv.index('--file'), '--module')\n _run(argv)\n", "issue": "Debugging of modules using -m is broken\nUsed to be able to debug Flask & other code using the `-m` flag.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n# import the wrapper first, so that it gets a chance\n# to detour pydevd socket functionality.\nimport ptvsd.wrapper\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a4\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n address = (None, port_num)\n if run_as == 'module':\n _run_module(address, filename)\n else:\n _run_file(address, filename)\n\n\ndef _run_module(address, modname):\n filename = modname + ':'\n argv = _run_argv(address, filename)\n argv.append('--module')\n _run(argv)\n\n\ndef _run_file(address, filename):\n argv = _run_argv(address, filename)\n _run(argv)\n\n\ndef _run_argv(address, filename):\n host, port = address\n if host is None:\n host = '127.0.0.1'\n return [\n '--port', str(port),\n '--client', host,\n '--file', filename,\n ]\n\n\ndef _run(argv):\n import pydevd\n sys.argv[1:0] = argv\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)\n raise\n", "path": "ptvsd/debugger.py"}]} | 1,022 | 104 |
gh_patches_debug_8163 | rasdani/github-patches | git_diff | microsoft__ptvsd-84 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Debugging modules (using -m switch)
This is currently not supported
</issue>
<code>
[start of ptvsd/debugger.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7
8 __author__ = "Microsoft Corporation <[email protected]>"
9 __version__ = "4.0.0a1"
10
11 DONT_DEBUG = []
12
13
14 def debug(filename, port_num, debug_id, debug_options, run_as):
15 # TODO: docstring
16
17 # import the wrapper first, so that it gets a chance
18 # to detour pydevd socket functionality.
19 import ptvsd.wrapper
20 import pydevd
21
22 sys.argv[1:0] = [
23 '--port', str(port_num),
24 '--client', '127.0.0.1',
25 '--file', filename,
26 ]
27 try:
28 pydevd.main()
29 except SystemExit as ex:
30 ptvsd.wrapper.ptvsd_sys_exit_code = ex.code
31 raise
32
[end of ptvsd/debugger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py
--- a/ptvsd/debugger.py
+++ b/ptvsd/debugger.py
@@ -19,11 +19,16 @@
import ptvsd.wrapper
import pydevd
- sys.argv[1:0] = [
+ args = [
'--port', str(port_num),
'--client', '127.0.0.1',
- '--file', filename,
]
+ if run_as == 'module':
+ args.append('--module')
+ args.extend(('--file', filename + ":"))
+ else:
+ args.extend(('--file', filename))
+ sys.argv[1:0] = args
try:
pydevd.main()
except SystemExit as ex:
| {"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -19,11 +19,16 @@\n import ptvsd.wrapper\n import pydevd\n \n- sys.argv[1:0] = [\n+ args = [\n '--port', str(port_num),\n '--client', '127.0.0.1',\n- '--file', filename,\n ]\n+ if run_as == 'module':\n+ args.append('--module')\n+ args.extend(('--file', filename + \":\"))\n+ else:\n+ args.extend(('--file', filename))\n+ sys.argv[1:0] = args\n try:\n pydevd.main()\n except SystemExit as ex:\n", "issue": "Debugging modules (using -m switch)\nThis is currently not supported\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a1\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n\n # import the wrapper first, so that it gets a chance\n # to detour pydevd socket functionality.\n import ptvsd.wrapper\n import pydevd\n\n sys.argv[1:0] = [\n '--port', str(port_num),\n '--client', '127.0.0.1',\n '--file', filename,\n ]\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = ex.code\n raise\n", "path": "ptvsd/debugger.py"}]} | 820 | 186 |
gh_patches_debug_16822 | rasdani/github-patches | git_diff | tough-dev-school__education-backend-560 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User.DoesNotExist: User matching query does not exist.
Sentry Issue: [EDUCATION-BACKEND-23](https://sentry.io/organizations/f213/issues/2200858697/?referrer=github_integration)
```
User.DoesNotExist: User matching query does not exist.
File "celery/app/autoretry.py", line 34, in run
return task._orig_run(*args, **kwargs)
File "app/tasks.py", line 77, in subscribe_to_mailchimp
user=apps.get_model('users.User').objects.get(pk=user_id),
File "django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "django/db/models/query.py", line 435, in get
raise self.model.DoesNotExist(
```
</issue>
<code>
[start of src/app/tasks.py]
1 from typing import List, Union
2
3 from anymail.exceptions import AnymailRequestsAPIError
4 from django.apps import apps
5 from django.conf import settings
6 from requests.exceptions import RequestException
7
8 from app.celery import celery
9 from app.integrations import tg
10 from app.integrations.clickmeeting import ClickMeetingClient, ClickMeetingHTTPException
11 from app.integrations.mailchimp import AppMailchimp, MailchimpException
12 from app.integrations.zoomus import ZoomusClient, ZoomusHTTPException
13 from app.mail.owl import TemplOwl
14
15
16 @celery.task(
17 autoretry_for=[AnymailRequestsAPIError],
18 retry_kwargs={
19 'max_retries': 10,
20 'countdown': 5,
21 },
22 )
23 def send_mail(to: Union[List, str], template_id, subject: str = '', ctx: dict = None, disable_antispam=False):
24 TemplOwl(
25 to=to,
26 template_id=template_id,
27 subject=subject,
28 ctx=ctx,
29 disable_antispam=disable_antispam,
30 ).send()
31
32
33 @celery.task(
34 autoretry_for=[RequestException, ClickMeetingHTTPException],
35 retry_kwargs={
36 'max_retries': 10,
37 'countdown': 5,
38 },
39 )
40 def invite_to_clickmeeting(room_url: str, email: str):
41 client = ClickMeetingClient()
42 client.invite(room_url, email)
43
44
45 @celery.task(
46 autoretry_for=[RequestException, ZoomusHTTPException],
47 retry_kwargs={
48 'max_retries': 10,
49 'countdown': 5,
50 },
51 )
52 def invite_to_zoomus(webinar_id: str, user_id: int):
53 user = apps.get_model('users.User').objects.get(pk=user_id)
54
55 client = ZoomusClient()
56 client.invite(webinar_id, user)
57
58
59 @celery.task(
60 autoretry_for=[RequestException, MailchimpException],
61 retry_kwargs={
62 'max_retries': 10,
63 'countdown': 5,
64 },
65 )
66 def subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):
67 if list_id is None:
68 list_id = settings.MAILCHIMP_CONTACT_LIST_ID
69
70 if not list_id:
71 return
72
73 mailchimp = AppMailchimp()
74
75 mailchimp.subscribe_django_user(
76 list_id=list_id,
77 user=apps.get_model('users.User').objects.get(pk=user_id),
78 tags=tags,
79 )
80
81
82 @celery.task
83 def send_happiness_message(text):
84 tg.send_happiness_message(text)
85
[end of src/app/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/app/tasks.py b/src/app/tasks.py
--- a/src/app/tasks.py
+++ b/src/app/tasks.py
@@ -3,6 +3,7 @@
from anymail.exceptions import AnymailRequestsAPIError
from django.apps import apps
from django.conf import settings
+from django.core.exceptions import ObjectDoesNotExist
from requests.exceptions import RequestException
from app.celery import celery
@@ -57,11 +58,12 @@
@celery.task(
- autoretry_for=[RequestException, MailchimpException],
+ autoretry_for=[RequestException, MailchimpException, ObjectDoesNotExist],
retry_kwargs={
'max_retries': 10,
'countdown': 5,
},
+ rate_limit='1/s',
)
def subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):
if list_id is None:
| {"golden_diff": "diff --git a/src/app/tasks.py b/src/app/tasks.py\n--- a/src/app/tasks.py\n+++ b/src/app/tasks.py\n@@ -3,6 +3,7 @@\n from anymail.exceptions import AnymailRequestsAPIError\n from django.apps import apps\n from django.conf import settings\n+from django.core.exceptions import ObjectDoesNotExist\n from requests.exceptions import RequestException\n \n from app.celery import celery\n@@ -57,11 +58,12 @@\n \n \n @celery.task(\n- autoretry_for=[RequestException, MailchimpException],\n+ autoretry_for=[RequestException, MailchimpException, ObjectDoesNotExist],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n+ rate_limit='1/s',\n )\n def subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):\n if list_id is None:\n", "issue": "User.DoesNotExist: User matching query does not exist.\nSentry Issue: [EDUCATION-BACKEND-23](https://sentry.io/organizations/f213/issues/2200858697/?referrer=github_integration)\n\n```\nUser.DoesNotExist: User matching query does not exist.\n File \"celery/app/autoretry.py\", line 34, in run\n return task._orig_run(*args, **kwargs)\n File \"app/tasks.py\", line 77, in subscribe_to_mailchimp\n user=apps.get_model('users.User').objects.get(pk=user_id),\n File \"django/db/models/manager.py\", line 85, in manager_method\n return getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n```\n", "before_files": [{"content": "from typing import List, Union\n\nfrom anymail.exceptions import AnymailRequestsAPIError\nfrom django.apps import apps\nfrom django.conf import settings\nfrom requests.exceptions import RequestException\n\nfrom app.celery import celery\nfrom app.integrations import tg\nfrom app.integrations.clickmeeting import ClickMeetingClient, ClickMeetingHTTPException\nfrom app.integrations.mailchimp import AppMailchimp, MailchimpException\nfrom app.integrations.zoomus import ZoomusClient, ZoomusHTTPException\nfrom app.mail.owl import TemplOwl\n\n\[email protected](\n autoretry_for=[AnymailRequestsAPIError],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef send_mail(to: Union[List, str], template_id, subject: str = '', ctx: dict = None, disable_antispam=False):\n TemplOwl(\n to=to,\n template_id=template_id,\n subject=subject,\n ctx=ctx,\n disable_antispam=disable_antispam,\n ).send()\n\n\[email protected](\n autoretry_for=[RequestException, ClickMeetingHTTPException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef invite_to_clickmeeting(room_url: str, email: str):\n client = ClickMeetingClient()\n client.invite(room_url, email)\n\n\[email protected](\n autoretry_for=[RequestException, ZoomusHTTPException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef invite_to_zoomus(webinar_id: str, user_id: int):\n user = apps.get_model('users.User').objects.get(pk=user_id)\n\n client = ZoomusClient()\n client.invite(webinar_id, user)\n\n\[email protected](\n autoretry_for=[RequestException, MailchimpException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):\n if list_id is None:\n list_id = settings.MAILCHIMP_CONTACT_LIST_ID\n\n if not list_id:\n return\n\n mailchimp = AppMailchimp()\n\n mailchimp.subscribe_django_user(\n list_id=list_id,\n user=apps.get_model('users.User').objects.get(pk=user_id),\n tags=tags,\n )\n\n\[email protected]\ndef send_happiness_message(text):\n tg.send_happiness_message(text)\n", "path": "src/app/tasks.py"}]} | 1,451 | 197 |
gh_patches_debug_26342 | rasdani/github-patches | git_diff | zalando__patroni-1535 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
features/steps/standby_cluster.py produces error under Windows
Trying to execute:
`>behave -i standby_cluster.feature`
will produce error:
```
2020-05-07 19:27:19,407 ERROR: Failed to execute ['c:userspashaappdatalocalprogramspythonpython37-32python.exe', 'features/callback.py', '5362', 'on_role_change', 'master', 'batman']
Traceback (most recent call last):
File "C:\Users\pasha\Code\zalando-patroni\patroni\postgresql\cancellable.py", line 28, in _start_process
self._process = psutil.Popen(cmd, *args, **kwargs)
File "c:\users\pasha\appdata\local\programs\python\python37-32\lib\site-packages\psutil\__init__.py", line 1431, in __init__
self.__subproc = subprocess.Popen(*args, **kwargs)
File "c:\users\pasha\appdata\local\programs\python\python37-32\lib\subprocess.py", line 775, in __init__
restore_signals, start_new_session)
File "c:\users\pasha\appdata\local\programs\python\python37-32\lib\subprocess.py", line 1178, in _execute_child
startupinfo)
FileNotFoundError: [WinError 2] The system cannot find the file specified
```
This error produced by `postgresql/cancellable.py`:
```
def _start_process(self, cmd, *args, **kwargs):
"""This method must be executed only when the `_lock` is acquired"""
try:
self._process_children = []
self._process_cmd = cmd
self._process = psutil.Popen(cmd, *args, **kwargs)
^^^^^^^^^^^^^^^^^
except Exception:
return logger.exception('Failed to execute %s', cmd)
return True
```
</issue>
<code>
[start of features/steps/standby_cluster.py]
1 import os
2 import sys
3 import time
4
5 from behave import step
6
7
8 select_replication_query = """
9 SELECT * FROM pg_catalog.pg_stat_replication
10 WHERE application_name = '{0}'
11 """
12
13 callback = sys.executable + " features/callback2.py "
14
15
16 @step('I start {name:w} with callback configured')
17 def start_patroni_with_callbacks(context, name):
18 return context.pctl.start(name, custom_config={
19 "postgresql": {
20 "callbacks": {
21 "on_role_change": sys.executable + " features/callback.py"
22 }
23 }
24 })
25
26
27 @step('I start {name:w} in a cluster {cluster_name:w}')
28 def start_patroni(context, name, cluster_name):
29 return context.pctl.start(name, custom_config={
30 "scope": cluster_name,
31 "postgresql": {
32 "callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},
33 "backup_restore": {
34 "command": (sys.executable + " features/backup_restore.py --sourcedir=" +
35 os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}
36 }
37 })
38
39
40 @step('I start {name:w} in a standby cluster {cluster_name:w} as a clone of {name2:w}')
41 def start_patroni_standby_cluster(context, name, cluster_name, name2):
42 # we need to remove patroni.dynamic.json in order to "bootstrap" standby cluster with existing PGDATA
43 os.unlink(os.path.join(context.pctl._processes[name]._data_dir, 'patroni.dynamic.json'))
44 port = context.pctl._processes[name2]._connkwargs.get('port')
45 context.pctl._processes[name].update_config({
46 "scope": cluster_name,
47 "bootstrap": {
48 "dcs": {
49 "ttl": 20,
50 "loop_wait": 2,
51 "retry_timeout": 5,
52 "standby_cluster": {
53 "host": "localhost",
54 "port": port,
55 "primary_slot_name": "pm_1",
56 "create_replica_methods": ["backup_restore", "basebackup"]
57 }
58 }
59 },
60 "postgresql": {
61 "callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')}
62 }
63 })
64 return context.pctl.start(name)
65
66
67 @step('{pg_name1:w} is replicating from {pg_name2:w} after {timeout:d} seconds')
68 def check_replication_status(context, pg_name1, pg_name2, timeout):
69 bound_time = time.time() + timeout
70
71 while time.time() < bound_time:
72 cur = context.pctl.query(
73 pg_name2,
74 select_replication_query.format(pg_name1),
75 fail_ok=True
76 )
77
78 if cur and len(cur.fetchall()) != 0:
79 break
80
81 time.sleep(1)
82 else:
83 assert False, "{0} is not replicating from {1} after {2} seconds".format(pg_name1, pg_name2, timeout)
84
[end of features/steps/standby_cluster.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/features/steps/standby_cluster.py b/features/steps/standby_cluster.py
--- a/features/steps/standby_cluster.py
+++ b/features/steps/standby_cluster.py
@@ -10,7 +10,8 @@
WHERE application_name = '{0}'
"""
-callback = sys.executable + " features/callback2.py "
+executable = sys.executable if os.name != 'nt' else sys.executable.replace('\\', '/')
+callback = executable + " features/callback2.py "
@step('I start {name:w} with callback configured')
@@ -18,7 +19,7 @@
return context.pctl.start(name, custom_config={
"postgresql": {
"callbacks": {
- "on_role_change": sys.executable + " features/callback.py"
+ "on_role_change": executable + " features/callback.py"
}
}
})
@@ -31,7 +32,7 @@
"postgresql": {
"callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},
"backup_restore": {
- "command": (sys.executable + " features/backup_restore.py --sourcedir=" +
+ "command": (executable + " features/backup_restore.py --sourcedir=" +
os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}
}
})
| {"golden_diff": "diff --git a/features/steps/standby_cluster.py b/features/steps/standby_cluster.py\n--- a/features/steps/standby_cluster.py\n+++ b/features/steps/standby_cluster.py\n@@ -10,7 +10,8 @@\n WHERE application_name = '{0}'\n \"\"\"\n \n-callback = sys.executable + \" features/callback2.py \"\n+executable = sys.executable if os.name != 'nt' else sys.executable.replace('\\\\', '/')\n+callback = executable + \" features/callback2.py \"\n \n \n @step('I start {name:w} with callback configured')\n@@ -18,7 +19,7 @@\n return context.pctl.start(name, custom_config={\n \"postgresql\": {\n \"callbacks\": {\n- \"on_role_change\": sys.executable + \" features/callback.py\"\n+ \"on_role_change\": executable + \" features/callback.py\"\n }\n }\n })\n@@ -31,7 +32,7 @@\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},\n \"backup_restore\": {\n- \"command\": (sys.executable + \" features/backup_restore.py --sourcedir=\" +\n+ \"command\": (executable + \" features/backup_restore.py --sourcedir=\" +\n os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}\n }\n })\n", "issue": "features/steps/standby_cluster.py produces error under Windows\nTrying to execute:\r\n`>behave -i standby_cluster.feature`\r\nwill produce error:\r\n```\r\n2020-05-07 19:27:19,407 ERROR: Failed to execute ['c:userspashaappdatalocalprogramspythonpython37-32python.exe', 'features/callback.py', '5362', 'on_role_change', 'master', 'batman']\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\pasha\\Code\\zalando-patroni\\patroni\\postgresql\\cancellable.py\", line 28, in _start_process\r\n self._process = psutil.Popen(cmd, *args, **kwargs)\r\n File \"c:\\users\\pasha\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\psutil\\__init__.py\", line 1431, in __init__\r\n self.__subproc = subprocess.Popen(*args, **kwargs)\r\n File \"c:\\users\\pasha\\appdata\\local\\programs\\python\\python37-32\\lib\\subprocess.py\", line 775, in __init__\r\n restore_signals, start_new_session)\r\n File \"c:\\users\\pasha\\appdata\\local\\programs\\python\\python37-32\\lib\\subprocess.py\", line 1178, in _execute_child\r\n startupinfo)\r\nFileNotFoundError: [WinError 2] The system cannot find the file specified\r\n```\r\n\r\nThis error produced by `postgresql/cancellable.py`: \r\n```\r\n def _start_process(self, cmd, *args, **kwargs):\r\n \"\"\"This method must be executed only when the `_lock` is acquired\"\"\"\r\n\r\n try:\r\n self._process_children = []\r\n self._process_cmd = cmd\r\n self._process = psutil.Popen(cmd, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^\r\n except Exception:\r\n return logger.exception('Failed to execute %s', cmd)\r\n return True\r\n```\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport time\n\nfrom behave import step\n\n\nselect_replication_query = \"\"\"\nSELECT * FROM pg_catalog.pg_stat_replication\nWHERE application_name = '{0}'\n\"\"\"\n\ncallback = sys.executable + \" features/callback2.py \"\n\n\n@step('I start {name:w} with callback configured')\ndef start_patroni_with_callbacks(context, name):\n return context.pctl.start(name, custom_config={\n \"postgresql\": {\n \"callbacks\": {\n \"on_role_change\": sys.executable + \" features/callback.py\"\n }\n }\n })\n\n\n@step('I start {name:w} in a cluster {cluster_name:w}')\ndef start_patroni(context, name, cluster_name):\n return context.pctl.start(name, custom_config={\n \"scope\": cluster_name,\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},\n \"backup_restore\": {\n \"command\": (sys.executable + \" features/backup_restore.py --sourcedir=\" +\n os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}\n }\n })\n\n\n@step('I start {name:w} in a standby cluster {cluster_name:w} as a clone of {name2:w}')\ndef start_patroni_standby_cluster(context, name, cluster_name, name2):\n # we need to remove patroni.dynamic.json in order to \"bootstrap\" standby cluster with existing PGDATA\n os.unlink(os.path.join(context.pctl._processes[name]._data_dir, 'patroni.dynamic.json'))\n port = context.pctl._processes[name2]._connkwargs.get('port')\n context.pctl._processes[name].update_config({\n \"scope\": cluster_name,\n \"bootstrap\": {\n \"dcs\": {\n \"ttl\": 20,\n \"loop_wait\": 2,\n \"retry_timeout\": 5,\n \"standby_cluster\": {\n \"host\": \"localhost\",\n \"port\": port,\n \"primary_slot_name\": \"pm_1\",\n \"create_replica_methods\": [\"backup_restore\", \"basebackup\"]\n }\n }\n },\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')}\n }\n })\n return context.pctl.start(name)\n\n\n@step('{pg_name1:w} is replicating from {pg_name2:w} after {timeout:d} seconds')\ndef check_replication_status(context, pg_name1, pg_name2, timeout):\n bound_time = time.time() + timeout\n\n while time.time() < bound_time:\n cur = context.pctl.query(\n pg_name2,\n select_replication_query.format(pg_name1),\n fail_ok=True\n )\n\n if cur and len(cur.fetchall()) != 0:\n break\n\n time.sleep(1)\n else:\n assert False, \"{0} is not replicating from {1} after {2} seconds\".format(pg_name1, pg_name2, timeout)\n", "path": "features/steps/standby_cluster.py"}]} | 1,835 | 314 |
gh_patches_debug_654 | rasdani/github-patches | git_diff | pex-tool__pex-2123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.133
On the docket:
+ [x] python<=3.8 symlink with a suffix (eg 3.7m) can create a venv without a pythonX.Y symlink which breaks pex assumptions that pythonX.Y is always available #2119
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.132"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.132"
+__version__ = "2.1.133"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.132\"\n+__version__ = \"2.1.133\"\n", "issue": "Release 2.1.133\nOn the docket:\r\n+ [x] python<=3.8 symlink with a suffix (eg 3.7m) can create a venv without a pythonX.Y symlink which breaks pex assumptions that pythonX.Y is always available #2119\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.132\"\n", "path": "pex/version.py"}]} | 649 | 99 |
gh_patches_debug_5511 | rasdani/github-patches | git_diff | napalm-automation__napalm-692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip >= 10.0.0 incompatibility
I am not going to create these for every single napalm module... but did for https://github.com/napalm-automation/napalm-ansible/issues/123 where I also saw this issue
pip 10.x no longer provides pip.req as needed in setup.py
https://github.com/pypa/pip/issues/5156
</issue>
<code>
[start of setup.py]
1 """setup.py file."""
2 import uuid
3
4 from setuptools import setup, find_packages
5
6 from pip.req import parse_requirements
7
8
9 install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
10 reqs = [str(ir.req) for ir in install_reqs]
11
12 __author__ = 'David Barroso <[email protected]>'
13
14 setup(
15 name="napalm",
16 version='2.3.0',
17 packages=find_packages(exclude=("test*", )),
18 test_suite='test_base',
19 author="David Barroso, Kirk Byers, Mircea Ulinic",
20 author_email="[email protected], [email protected], [email protected]",
21 description="Network Automation and Programmability Abstraction Layer with Multivendor support",
22 classifiers=[
23 'Topic :: Utilities',
24 'Programming Language :: Python',
25 'Programming Language :: Python :: 2',
26 'Programming Language :: Python :: 2.7',
27 'Programming Language :: Python :: 3',
28 'Programming Language :: Python :: 3.4',
29 'Programming Language :: Python :: 3.5',
30 'Programming Language :: Python :: 3.6',
31 'Operating System :: POSIX :: Linux',
32 'Operating System :: MacOS',
33 ],
34 url="https://github.com/napalm-automation/napalm",
35 include_package_data=True,
36 install_requires=reqs,
37 entry_points={
38 'console_scripts': [
39 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',
40 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',
41 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',
42 'napalm=napalm.base.clitools.cl_napalm:main',
43 ],
44 }
45 )
46
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,13 +1,9 @@
"""setup.py file."""
-import uuid
-
from setuptools import setup, find_packages
-from pip.req import parse_requirements
-
+with open("requirements.txt", "r") as fs:
+ reqs = [r for r in fs.read().splitlines() if (len(r) > 0 and not r.startswith("#"))]
-install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
-reqs = [str(ir.req) for ir in install_reqs]
__author__ = 'David Barroso <[email protected]>'
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,13 +1,9 @@\n \"\"\"setup.py file.\"\"\"\n-import uuid\n-\n from setuptools import setup, find_packages\n \n-from pip.req import parse_requirements\n-\n+with open(\"requirements.txt\", \"r\") as fs:\n+ reqs = [r for r in fs.read().splitlines() if (len(r) > 0 and not r.startswith(\"#\"))]\n \n-install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())\n-reqs = [str(ir.req) for ir in install_reqs]\n \n __author__ = 'David Barroso <[email protected]>'\n", "issue": "pip >= 10.0.0 incompatibility \nI am not going to create these for every single napalm module... but did for https://github.com/napalm-automation/napalm-ansible/issues/123 where I also saw this issue\r\n\r\npip 10.x no longer provides pip.req as needed in setup.py\r\nhttps://github.com/pypa/pip/issues/5156\r\n\n", "before_files": [{"content": "\"\"\"setup.py file.\"\"\"\nimport uuid\n\nfrom setuptools import setup, find_packages\n\nfrom pip.req import parse_requirements\n\n\ninstall_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())\nreqs = [str(ir.req) for ir in install_reqs]\n\n__author__ = 'David Barroso <[email protected]>'\n\nsetup(\n name=\"napalm\",\n version='2.3.0',\n packages=find_packages(exclude=(\"test*\", )),\n test_suite='test_base',\n author=\"David Barroso, Kirk Byers, Mircea Ulinic\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Network Automation and Programmability Abstraction Layer with Multivendor support\",\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/napalm-automation/napalm\",\n include_package_data=True,\n install_requires=reqs,\n entry_points={\n 'console_scripts': [\n 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',\n 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',\n 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',\n 'napalm=napalm.base.clitools.cl_napalm:main',\n ],\n }\n)\n", "path": "setup.py"}]} | 1,113 | 156 |
gh_patches_debug_33129 | rasdani/github-patches | git_diff | Kinto__kinto-1259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
If id, schema or last_modified are marked as required, record can't be validated
Because we pop those fields before validating them with the collection schema.
We can either:
* prevent those fields to be mentioned as `required` when defining the collection schema
* or not pop them if they are present in the schema before validating
If id, schema or last_modified are marked as required, record can't be validated
Because we pop those fields before validating them with the collection schema.
We can either:
* prevent those fields to be mentioned as `required` when defining the collection schema
* or not pop them if they are present in the schema before validating
</issue>
<code>
[start of kinto/views/records.py]
1 import copy
2
3 import jsonschema
4 from kinto.core import resource, utils
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import object_exists_or_404
11
12
13 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
14
15
16 @resource.register(name='record',
17 collection_path=_parent_path + '/records',
18 record_path=_parent_path + '/records/{{id}}')
19 class Record(resource.ShareableResource):
20
21 schema_field = 'schema'
22
23 def __init__(self, request, **kwargs):
24 # Before all, first check that the parent collection exists.
25 # Check if already fetched before (in batch).
26 collections = request.bound_data.setdefault('collections', {})
27 collection_uri = self.get_parent_id(request)
28 if collection_uri not in collections:
29 # Unknown yet, fetch from storage.
30 collection_parent_id = utils.instance_uri(request, 'bucket',
31 id=self.bucket_id)
32 collection = object_exists_or_404(request,
33 collection_id='collection',
34 parent_id=collection_parent_id,
35 object_id=self.collection_id)
36 collections[collection_uri] = collection
37
38 super().__init__(request, **kwargs)
39 self._collection = collections[collection_uri]
40
41 def get_parent_id(self, request):
42 self.bucket_id = request.matchdict['bucket_id']
43 self.collection_id = request.matchdict['collection_id']
44 return utils.instance_uri(request, 'collection',
45 bucket_id=self.bucket_id,
46 id=self.collection_id)
47
48 def process_record(self, new, old=None):
49 """Validate records against collection schema, if any."""
50 new = super().process_record(new, old)
51
52 schema = self._collection.get('schema')
53 settings = self.request.registry.settings
54 schema_validation = 'experimental_collection_schema_validation'
55 if not schema or not asbool(settings.get(schema_validation)):
56 return new
57
58 collection_timestamp = self._collection[self.model.modified_field]
59
60 try:
61 stripped = copy.deepcopy(new)
62 stripped.pop(self.model.id_field, None)
63 stripped.pop(self.model.modified_field, None)
64 stripped.pop(self.model.permissions_field, None)
65 stripped.pop(self.schema_field, None)
66 jsonschema.validate(stripped, schema)
67 except jsonschema_exceptions.ValidationError as e:
68 if e.validator_value:
69 field = e.validator_value[-1]
70 else:
71 field = e.schema_path[-1]
72 raise_invalid(self.request, name=field, description=e.message)
73
74 new[self.schema_field] = collection_timestamp
75 return new
76
77 def collection_get(self):
78 result = super().collection_get()
79 self._handle_cache_expires(self.request.response)
80 return result
81
82 def get(self):
83 result = super().get()
84 self._handle_cache_expires(self.request.response)
85 return result
86
87 def _handle_cache_expires(self, response):
88 """If the parent collection defines a ``cache_expires`` attribute,
89 then cache-control response headers are sent.
90
91 .. note::
92
93 Those headers are also sent if the
94 ``kinto.record_cache_expires_seconds`` setting is defined.
95 """
96 is_anonymous = Authenticated not in self.request.effective_principals
97 if not is_anonymous:
98 return
99
100 cache_expires = self._collection.get('cache_expires')
101 if cache_expires is None:
102 by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)
103 by_collection = '{}_{}_record_cache_expires_seconds'.format(
104 self.bucket_id, self.collection_id)
105 settings = self.request.registry.settings
106 cache_expires = settings.get(by_collection,
107 settings.get(by_bucket))
108
109 if cache_expires is not None:
110 response.cache_expires(seconds=int(cache_expires))
111
[end of kinto/views/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -1,5 +1,3 @@
-import copy
-
import jsonschema
from kinto.core import resource, utils
from kinto.core.errors import raise_invalid
@@ -55,15 +53,21 @@
if not schema or not asbool(settings.get(schema_validation)):
return new
- collection_timestamp = self._collection[self.model.modified_field]
-
+ # Remove internal and auto-assigned fields from schema and record.
+ internal_fields = (self.model.id_field,
+ self.model.modified_field,
+ self.schema_field,
+ self.model.permissions_field)
+ required_fields = [f for f in schema.get('required', []) if f not in internal_fields]
+ if required_fields:
+ schema = {**schema, 'required': required_fields}
+ else:
+ schema = {f: v for f, v in new.items() if f != 'required'}
+ data = {f: v for f, v in new.items() if f not in internal_fields}
+
+ # Validate or fail with 400.
try:
- stripped = copy.deepcopy(new)
- stripped.pop(self.model.id_field, None)
- stripped.pop(self.model.modified_field, None)
- stripped.pop(self.model.permissions_field, None)
- stripped.pop(self.schema_field, None)
- jsonschema.validate(stripped, schema)
+ jsonschema.validate(data, schema)
except jsonschema_exceptions.ValidationError as e:
if e.validator_value:
field = e.validator_value[-1]
@@ -71,7 +75,10 @@
field = e.schema_path[-1]
raise_invalid(self.request, name=field, description=e.message)
+ # Assign the schema version (collection object timestamp) to the record.
+ collection_timestamp = self._collection[self.model.modified_field]
new[self.schema_field] = collection_timestamp
+
return new
def collection_get(self):
| {"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -1,5 +1,3 @@\n-import copy\n-\n import jsonschema\n from kinto.core import resource, utils\n from kinto.core.errors import raise_invalid\n@@ -55,15 +53,21 @@\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n \n- collection_timestamp = self._collection[self.model.modified_field]\n-\n+ # Remove internal and auto-assigned fields from schema and record.\n+ internal_fields = (self.model.id_field,\n+ self.model.modified_field,\n+ self.schema_field,\n+ self.model.permissions_field)\n+ required_fields = [f for f in schema.get('required', []) if f not in internal_fields]\n+ if required_fields:\n+ schema = {**schema, 'required': required_fields}\n+ else:\n+ schema = {f: v for f, v in new.items() if f != 'required'}\n+ data = {f: v for f, v in new.items() if f not in internal_fields}\n+\n+ # Validate or fail with 400.\n try:\n- stripped = copy.deepcopy(new)\n- stripped.pop(self.model.id_field, None)\n- stripped.pop(self.model.modified_field, None)\n- stripped.pop(self.model.permissions_field, None)\n- stripped.pop(self.schema_field, None)\n- jsonschema.validate(stripped, schema)\n+ jsonschema.validate(data, schema)\n except jsonschema_exceptions.ValidationError as e:\n if e.validator_value:\n field = e.validator_value[-1]\n@@ -71,7 +75,10 @@\n field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n \n+ # Assign the schema version (collection object timestamp) to the record.\n+ collection_timestamp = self._collection[self.model.modified_field]\n new[self.schema_field] = collection_timestamp\n+\n return new\n \n def collection_get(self):\n", "issue": "If id, schema or last_modified are marked as required, record can't be validated\nBecause we pop those fields before validating them with the collection schema.\r\n\r\nWe can either:\r\n* prevent those fields to be mentioned as `required` when defining the collection schema\r\n* or not pop them if they are present in the schema before validating\nIf id, schema or last_modified are marked as required, record can't be validated\nBecause we pop those fields before validating them with the collection schema.\r\n\r\nWe can either:\r\n* prevent those fields to be mentioned as `required` when defining the collection schema\r\n* or not pop them if they are present in the schema before validating\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = utils.instance_uri(request, 'bucket',\n id=self.bucket_id)\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super().__init__(request, **kwargs)\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return utils.instance_uri(request, 'collection',\n bucket_id=self.bucket_id,\n id=self.collection_id)\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super().process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n if e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super().collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super().get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)\n by_collection = '{}_{}_record_cache_expires_seconds'.format(\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}]} | 1,722 | 454 |
gh_patches_debug_7892 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-23 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
no battery status on M510 mouse
</issue>
<code>
[start of lib/logitech/unifying_receiver/hidpp10.py]
1 #
2 #
3 #
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 from logging import getLogger # , DEBUG as _DEBUG
8 _log = getLogger('LUR').getChild('hidpp10')
9 del getLogger
10
11 from .common import (strhex as _strhex,
12 NamedInts as _NamedInts,
13 FirmwareInfo as _FirmwareInfo)
14 from .hidpp20 import FIRMWARE_KIND
15
16 #
17 # constants
18 #
19
20 DEVICE_KIND = _NamedInts(
21 keyboard=0x01,
22 mouse=0x02,
23 numpad=0x03,
24 presenter=0x04,
25 trackball=0x08,
26 touchpad=0x09)
27
28 POWER_SWITCH_LOCATION = _NamedInts(
29 base=0x01,
30 top_case=0x02,
31 edge_of_top_right_corner=0x03,
32 top_left_corner=0x05,
33 bottom_left_corner=0x06,
34 top_right_corner=0x07,
35 bottom_right_corner=0x08,
36 top_edge=0x09,
37 right_edge=0x0A,
38 left_edge=0x0B,
39 bottom_edge=0x0C)
40
41 NOTIFICATION_FLAG = _NamedInts(
42 battery_status=0x100000,
43 wireless=0x000100,
44 software_present=0x0000800)
45
46 ERROR = _NamedInts(
47 invalid_SubID__command=0x01,
48 invalid_address=0x02,
49 invalid_value=0x03,
50 connection_request_failed=0x04,
51 too_many_devices=0x05,
52 already_exists=0x06,
53 busy=0x07,
54 unknown_device=0x08,
55 resource_error=0x09,
56 request_unavailable=0x0A,
57 unsupported_parameter_value=0x0B,
58 wrong_pin_code=0x0C)
59
60 PAIRING_ERRORS = _NamedInts(
61 device_timeout=0x01,
62 device_not_supported=0x02,
63 too_many_devices=0x03,
64 sequence_timeout=0x06)
65
66 #
67 # functions
68 #
69
70 def get_register(device, name, default_number=-1):
71 known_register = device.registers[name]
72 register = known_register or default_number
73 if register > 0:
74 reply = device.request(0x8100 + (register & 0xFF))
75 if reply:
76 return reply
77
78 if not known_register and device.ping():
79 _log.warn("%s: failed to read '%s' from default register 0x%02X, blacklisting", device, name, default_number)
80 device.registers[-default_number] = name
81
82
83 def get_battery(device):
84 """Reads a device's battery level, if provided by the HID++ 1.0 protocol."""
85 reply = get_register(device, 'battery', 0x0D)
86 if reply:
87 charge = ord(reply[:1])
88 status = ord(reply[2:3]) & 0xF0
89 status = ('discharging' if status == 0x30
90 else 'charging' if status == 0x50
91 else 'fully charged' if status == 0x90
92 else None)
93 return charge, status
94
95 reply = get_register(device, 'battery_status', 0x07)
96 if reply:
97 battery_status = ord(reply[:1])
98 _log.info("%s: battery status %02X", device, battery_status)
99
100
101 def get_serial(device):
102 if device.kind is None:
103 dev_id = 0x03
104 receiver = device
105 else:
106 dev_id = 0x30 + device.number - 1
107 receiver = device.receiver
108
109 serial = receiver.request(0x83B5, dev_id)
110 if serial:
111 return _strhex(serial[1:5])
112
113
114 def get_firmware(device):
115 firmware = []
116
117 reply = device.request(0x81F1, 0x01)
118 if reply:
119 fw_version = _strhex(reply[1:3])
120 fw_version = '%s.%s' % (fw_version[0:2], fw_version[2:4])
121 reply = device.request(0x81F1, 0x02)
122 if reply:
123 fw_version += '.B' + _strhex(reply[1:3])
124 fw = _FirmwareInfo(FIRMWARE_KIND.Firmware, '', fw_version, None)
125 firmware.append(fw)
126
127 reply = device.request(0x81F1, 0x04)
128 if reply:
129 bl_version = _strhex(reply[1:3])
130 bl_version = '%s.%s' % (bl_version[0:2], bl_version[2:4])
131 bl = _FirmwareInfo(FIRMWARE_KIND.Bootloader, '', bl_version, None)
132 firmware.append(bl)
133
134 return tuple(firmware)
135
[end of lib/logitech/unifying_receiver/hidpp10.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/logitech/unifying_receiver/hidpp10.py b/lib/logitech/unifying_receiver/hidpp10.py
--- a/lib/logitech/unifying_receiver/hidpp10.py
+++ b/lib/logitech/unifying_receiver/hidpp10.py
@@ -94,8 +94,16 @@
reply = get_register(device, 'battery_status', 0x07)
if reply:
- battery_status = ord(reply[:1])
- _log.info("%s: battery status %02X", device, battery_status)
+ level = ord(reply[:1])
+ battery_status = ord(reply[2:3])
+ charge = (90 if level == 7 # full
+ else 50 if level == 5 # good
+ else 20 if level == 3 # low
+ else 5 if level == 1 # critical
+ else 0 ) # wtf?
+ status = ('charging' if battery_status == 0x25
+ else 'discharging')
+ return charge, status
def get_serial(device):
| {"golden_diff": "diff --git a/lib/logitech/unifying_receiver/hidpp10.py b/lib/logitech/unifying_receiver/hidpp10.py\n--- a/lib/logitech/unifying_receiver/hidpp10.py\n+++ b/lib/logitech/unifying_receiver/hidpp10.py\n@@ -94,8 +94,16 @@\n \n \treply = get_register(device, 'battery_status', 0x07)\n \tif reply:\n-\t\tbattery_status = ord(reply[:1])\n-\t\t_log.info(\"%s: battery status %02X\", device, battery_status)\n+\t\tlevel = ord(reply[:1])\n+\t\tbattery_status = ord(reply[2:3])\n+\t\tcharge = (90 if level == 7 # full\n+\t\t\telse 50 if level == 5 # good\n+\t\t\telse 20 if level == 3 # low\n+\t\t\telse 5 if level == 1 # critical\n+\t\t\telse 0 ) # wtf?\n+\t\tstatus = ('charging' if battery_status == 0x25\n+\t\t\telse 'discharging')\n+\t\treturn charge, status\n \n \n def get_serial(device):\n", "issue": "no battery status on M510 mouse\n\n", "before_files": [{"content": "#\n#\n#\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger # , DEBUG as _DEBUG\n_log = getLogger('LUR').getChild('hidpp10')\ndel getLogger\n\nfrom .common import (strhex as _strhex,\n\t\t\t\t\tNamedInts as _NamedInts,\n\t\t\t\t\tFirmwareInfo as _FirmwareInfo)\nfrom .hidpp20 import FIRMWARE_KIND\n\n#\n# constants\n#\n\nDEVICE_KIND = _NamedInts(\n\t\t\t\tkeyboard=0x01,\n\t\t\t\tmouse=0x02,\n\t\t\t\tnumpad=0x03,\n\t\t\t\tpresenter=0x04,\n\t\t\t\ttrackball=0x08,\n\t\t\t\ttouchpad=0x09)\n\nPOWER_SWITCH_LOCATION = _NamedInts(\n\t\t\t\tbase=0x01,\n\t\t\t\ttop_case=0x02,\n\t\t\t\tedge_of_top_right_corner=0x03,\n\t\t\t\ttop_left_corner=0x05,\n\t\t\t\tbottom_left_corner=0x06,\n\t\t\t\ttop_right_corner=0x07,\n\t\t\t\tbottom_right_corner=0x08,\n\t\t\t\ttop_edge=0x09,\n\t\t\t\tright_edge=0x0A,\n\t\t\t\tleft_edge=0x0B,\n\t\t\t\tbottom_edge=0x0C)\n\nNOTIFICATION_FLAG = _NamedInts(\n\t\t\t\tbattery_status=0x100000,\n\t\t\t\twireless=0x000100,\n\t\t\t\tsoftware_present=0x0000800)\n\nERROR = _NamedInts(\n\t\t\t\tinvalid_SubID__command=0x01,\n\t\t\t\tinvalid_address=0x02,\n\t\t\t\tinvalid_value=0x03,\n\t\t\t\tconnection_request_failed=0x04,\n\t\t\t\ttoo_many_devices=0x05,\n\t\t\t\talready_exists=0x06,\n\t\t\t\tbusy=0x07,\n\t\t\t\tunknown_device=0x08,\n\t\t\t\tresource_error=0x09,\n\t\t\t\trequest_unavailable=0x0A,\n\t\t\t\tunsupported_parameter_value=0x0B,\n\t\t\t\twrong_pin_code=0x0C)\n\nPAIRING_ERRORS = _NamedInts(\n\t\t\t\tdevice_timeout=0x01,\n\t\t\t\tdevice_not_supported=0x02,\n\t\t\t\ttoo_many_devices=0x03,\n\t\t\t\tsequence_timeout=0x06)\n\n#\n# functions\n#\n\ndef get_register(device, name, default_number=-1):\n\tknown_register = device.registers[name]\n\tregister = known_register or default_number\n\tif register > 0:\n\t\treply = device.request(0x8100 + (register & 0xFF))\n\t\tif reply:\n\t\t\treturn reply\n\n\t\tif not known_register and device.ping():\n\t\t\t_log.warn(\"%s: failed to read '%s' from default register 0x%02X, blacklisting\", device, name, default_number)\n\t\t\tdevice.registers[-default_number] = name\n\n\ndef get_battery(device):\n\t\"\"\"Reads a device's battery level, if provided by the HID++ 1.0 protocol.\"\"\"\n\treply = get_register(device, 'battery', 0x0D)\n\tif reply:\n\t\tcharge = ord(reply[:1])\n\t\tstatus = ord(reply[2:3]) & 0xF0\n\t\tstatus = ('discharging' if status == 0x30\n\t\t\t\telse 'charging' if status == 0x50\n\t\t\t\telse 'fully charged' if status == 0x90\n\t\t\t\telse None)\n\t\treturn charge, status\n\n\treply = get_register(device, 'battery_status', 0x07)\n\tif reply:\n\t\tbattery_status = ord(reply[:1])\n\t\t_log.info(\"%s: battery status %02X\", device, battery_status)\n\n\ndef get_serial(device):\n\tif device.kind is None:\n\t\tdev_id = 0x03\n\t\treceiver = device\n\telse:\n\t\tdev_id = 0x30 + device.number - 1\n\t\treceiver = device.receiver\n\n\tserial = receiver.request(0x83B5, dev_id)\n\tif serial:\n\t\treturn _strhex(serial[1:5])\n\n\ndef get_firmware(device):\n\tfirmware = []\n\n\treply = device.request(0x81F1, 0x01)\n\tif reply:\n\t\tfw_version = _strhex(reply[1:3])\n\t\tfw_version = '%s.%s' % (fw_version[0:2], fw_version[2:4])\n\t\treply = device.request(0x81F1, 0x02)\n\t\tif reply:\n\t\t\tfw_version += '.B' + _strhex(reply[1:3])\n\t\tfw = _FirmwareInfo(FIRMWARE_KIND.Firmware, '', fw_version, None)\n\t\tfirmware.append(fw)\n\n\treply = device.request(0x81F1, 0x04)\n\tif reply:\n\t\tbl_version = _strhex(reply[1:3])\n\t\tbl_version = '%s.%s' % (bl_version[0:2], bl_version[2:4])\n\t\tbl = _FirmwareInfo(FIRMWARE_KIND.Bootloader, '', bl_version, None)\n\t\tfirmware.append(bl)\n\n\treturn tuple(firmware)\n", "path": "lib/logitech/unifying_receiver/hidpp10.py"}]} | 2,017 | 253 |
gh_patches_debug_12366 | rasdani/github-patches | git_diff | airctic__icevision-883 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add kwargs to EfficientDet model() method
Add kwargs to EfficientDet model() method. This will allow to pass `pretrained_backbone` argument to EfficientDet `create_model_from_config()` method. That will prevent loading pretrained weights if the user wish to do so
</issue>
<code>
[start of icevision/models/ross/efficientdet/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5 from icevision.models.ross.efficientdet.utils import *
6 from icevision.models.ross.efficientdet.backbones import *
7 from effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench
8 from effdet import create_model_from_config
9 from effdet.efficientdet import HeadNet
10
11
12 def model(
13 backbone: EfficientDetBackboneConfig,
14 num_classes: int,
15 img_size: int,
16 ) -> nn.Module:
17 """Creates the efficientdet model specified by `model_name`.
18
19 The model implementation is by Ross Wightman, original repo
20 [here](https://github.com/rwightman/efficientdet-pytorch).
21
22 # Arguments
23 backbone: Specifies the backbone to use create the model. For pretrained models, check
24 [this](https://github.com/rwightman/efficientdet-pytorch#models) table.
25 num_classes: Number of classes of your dataset (including background).
26 img_size: Image size that will be fed to the model. Must be squared and
27 divisible by 128.
28
29 # Returns
30 A PyTorch model.
31 """
32 model_name = backbone.model_name
33 config = get_efficientdet_config(model_name=model_name)
34 config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size
35
36 model_bench = create_model_from_config(
37 config,
38 bench_task="train",
39 bench_labeler=True,
40 num_classes=num_classes - 1,
41 pretrained=backbone.pretrained,
42 )
43
44 # TODO: Break down param groups for backbone
45 def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:
46 unwrapped = unwrap_bench(model)
47
48 layers = [
49 unwrapped.backbone,
50 unwrapped.fpn,
51 nn.Sequential(unwrapped.class_net, unwrapped.box_net),
52 ]
53 param_groups = [list(layer.parameters()) for layer in layers]
54 check_all_model_params_in_groups2(model, param_groups)
55
56 return param_groups
57
58 model_bench.param_groups = MethodType(param_groups_fn, model_bench)
59
60 return model_bench
61
[end of icevision/models/ross/efficientdet/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/ross/efficientdet/model.py b/icevision/models/ross/efficientdet/model.py
--- a/icevision/models/ross/efficientdet/model.py
+++ b/icevision/models/ross/efficientdet/model.py
@@ -13,6 +13,7 @@
backbone: EfficientDetBackboneConfig,
num_classes: int,
img_size: int,
+ **kwargs,
) -> nn.Module:
"""Creates the efficientdet model specified by `model_name`.
@@ -39,6 +40,7 @@
bench_labeler=True,
num_classes=num_classes - 1,
pretrained=backbone.pretrained,
+ **kwargs,
)
# TODO: Break down param groups for backbone
| {"golden_diff": "diff --git a/icevision/models/ross/efficientdet/model.py b/icevision/models/ross/efficientdet/model.py\n--- a/icevision/models/ross/efficientdet/model.py\n+++ b/icevision/models/ross/efficientdet/model.py\n@@ -13,6 +13,7 @@\n backbone: EfficientDetBackboneConfig,\n num_classes: int,\n img_size: int,\n+ **kwargs,\n ) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n \n@@ -39,6 +40,7 @@\n bench_labeler=True,\n num_classes=num_classes - 1,\n pretrained=backbone.pretrained,\n+ **kwargs,\n )\n \n # TODO: Break down param groups for backbone\n", "issue": "Add kwargs to EfficientDet model() method\nAdd kwargs to EfficientDet model() method. This will allow to pass `pretrained_backbone` argument to EfficientDet `create_model_from_config()` method. That will prevent loading pretrained weights if the user wish to do so\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.models.ross.efficientdet.utils import *\nfrom icevision.models.ross.efficientdet.backbones import *\nfrom effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench\nfrom effdet import create_model_from_config\nfrom effdet.efficientdet import HeadNet\n\n\ndef model(\n backbone: EfficientDetBackboneConfig,\n num_classes: int,\n img_size: int,\n) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n\n The model implementation is by Ross Wightman, original repo\n [here](https://github.com/rwightman/efficientdet-pytorch).\n\n # Arguments\n backbone: Specifies the backbone to use create the model. For pretrained models, check\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n divisible by 128.\n\n # Returns\n A PyTorch model.\n \"\"\"\n model_name = backbone.model_name\n config = get_efficientdet_config(model_name=model_name)\n config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size\n\n model_bench = create_model_from_config(\n config,\n bench_task=\"train\",\n bench_labeler=True,\n num_classes=num_classes - 1,\n pretrained=backbone.pretrained,\n )\n\n # TODO: Break down param groups for backbone\n def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:\n unwrapped = unwrap_bench(model)\n\n layers = [\n unwrapped.backbone,\n unwrapped.fpn,\n nn.Sequential(unwrapped.class_net, unwrapped.box_net),\n ]\n param_groups = [list(layer.parameters()) for layer in layers]\n check_all_model_params_in_groups2(model, param_groups)\n\n return param_groups\n\n model_bench.param_groups = MethodType(param_groups_fn, model_bench)\n\n return model_bench\n", "path": "icevision/models/ross/efficientdet/model.py"}]} | 1,199 | 167 |
gh_patches_debug_1 | rasdani/github-patches | git_diff | kartoza__prj.app-866 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only one organisation can be created per account
It seems that only one organisation can be created from a login account. The folks at Camptocamp have two separate organisations (companies) and are unable to create the second organisation from their login.
</issue>
<code>
[start of django_project/core/settings/__init__.py]
[end of django_project/core/settings/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/core/settings/__init__.py b/django_project/core/settings/__init__.py
--- a/django_project/core/settings/__init__.py
+++ b/django_project/core/settings/__init__.py
@@ -0,0 +1 @@
+# coding=utf-8
| {"golden_diff": "diff --git a/django_project/core/settings/__init__.py b/django_project/core/settings/__init__.py\n--- a/django_project/core/settings/__init__.py\n+++ b/django_project/core/settings/__init__.py\n@@ -0,0 +1 @@\n+# coding=utf-8\n", "issue": "Only one organisation can be created per account \nIt seems that only one organisation can be created from a login account. The folks at Camptocamp have two separate organisations (companies) and are unable to create the second organisation from their login.\r\n\n", "before_files": [{"content": "", "path": "django_project/core/settings/__init__.py"}]} | 593 | 63 |
gh_patches_debug_2249 | rasdani/github-patches | git_diff | rasterio__rasterio-598 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rio warp null transformer error with bad proj4
Currently, if you pass a bad projection, you get the following behavior:
```
$ rio warp --dst-crs "+proj=foobar" tests/data/warp_test.tif /tmp/foo.tif
ERROR:GDAL:CPLE_NotSupported in Failed to initialize PROJ.4 with `+proj=foobar +wktext'.
Traceback (most recent call last):
...
File "/Users/mperry/work/rasterio/rasterio/rio/warp.py", line 198, in warp
resolution=res)
File "/Users/mperry/work/rasterio/rasterio/warp.py", line 296, in calculate_default_transform
left, bottom, right, top)
File "rasterio/_warp.pyx", line 535, in rasterio._warp._calculate_default_transform (rasterio/_warp.cpp:9551)
with InMemoryRaster(
File "rasterio/_warp.pyx", line 542, in rasterio._warp._calculate_default_transform (rasterio/_warp.cpp:9261)
raise ValueError("NULL transformer")
ValueError: NULL transformer
```
The transformer fails to initialize, which is reasonable considering the invalid proj string. Is there any way to catch that error and report back something more meaningful than "NULL transformer"?
</issue>
<code>
[start of rasterio/errors.py]
1 """A module of errors."""
2
3 from click import FileError
4
5
6 class RasterioIOError(IOError):
7 """A failure to open a dataset using the presently registered drivers."""
8
9
10 class RasterioDriverRegistrationError(ValueError):
11 """To be raised when, eg, _gdal.GDALGetDriverByName("MEM") returns NULL."""
12
13
14 class FileOverwriteError(FileError):
15 """Rasterio's CLI refuses to implicitly clobber output files."""
16
17 def __init__(self, message):
18 super(FileOverwriteError, self).__init__('', hint=message)
19
[end of rasterio/errors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/errors.py b/rasterio/errors.py
--- a/rasterio/errors.py
+++ b/rasterio/errors.py
@@ -7,7 +7,7 @@
"""A failure to open a dataset using the presently registered drivers."""
-class RasterioDriverRegistrationError(ValueError):
+class DriverRegistrationError(ValueError):
"""To be raised when, eg, _gdal.GDALGetDriverByName("MEM") returns NULL."""
| {"golden_diff": "diff --git a/rasterio/errors.py b/rasterio/errors.py\n--- a/rasterio/errors.py\n+++ b/rasterio/errors.py\n@@ -7,7 +7,7 @@\n \"\"\"A failure to open a dataset using the presently registered drivers.\"\"\"\n \n \n-class RasterioDriverRegistrationError(ValueError):\n+class DriverRegistrationError(ValueError):\n \"\"\"To be raised when, eg, _gdal.GDALGetDriverByName(\"MEM\") returns NULL.\"\"\"\n", "issue": "rio warp null transformer error with bad proj4\nCurrently, if you pass a bad projection, you get the following behavior:\n\n```\n$ rio warp --dst-crs \"+proj=foobar\" tests/data/warp_test.tif /tmp/foo.tif\nERROR:GDAL:CPLE_NotSupported in Failed to initialize PROJ.4 with `+proj=foobar +wktext'.\nTraceback (most recent call last):\n...\n File \"/Users/mperry/work/rasterio/rasterio/rio/warp.py\", line 198, in warp\n resolution=res)\n File \"/Users/mperry/work/rasterio/rasterio/warp.py\", line 296, in calculate_default_transform\n left, bottom, right, top)\n File \"rasterio/_warp.pyx\", line 535, in rasterio._warp._calculate_default_transform (rasterio/_warp.cpp:9551)\n with InMemoryRaster(\n File \"rasterio/_warp.pyx\", line 542, in rasterio._warp._calculate_default_transform (rasterio/_warp.cpp:9261)\n raise ValueError(\"NULL transformer\")\nValueError: NULL transformer\n```\n\nThe transformer fails to initialize, which is reasonable considering the invalid proj string. Is there any way to catch that error and report back something more meaningful than \"NULL transformer\"?\n\n", "before_files": [{"content": "\"\"\"A module of errors.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioIOError(IOError):\n \"\"\"A failure to open a dataset using the presently registered drivers.\"\"\"\n\n\nclass RasterioDriverRegistrationError(ValueError):\n \"\"\"To be raised when, eg, _gdal.GDALGetDriverByName(\"MEM\") returns NULL.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Rasterio's CLI refuses to implicitly clobber output files.\"\"\"\n\n def __init__(self, message):\n super(FileOverwriteError, self).__init__('', hint=message)\n", "path": "rasterio/errors.py"}]} | 979 | 97 |
gh_patches_debug_1376 | rasdani/github-patches | git_diff | flairNLP__flair-419 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logging overwrite less sweeping
To be removed, once it is done: Please add the appropriate label to this ticket, e.g. feature or enhancement.
**Is your feature/enhancement request related to a problem? Please describe.**
When using flair in other applications, the fact that it disables existing logs in `__init__.py` can be detrimental. For instance when wrapping it up as a component in a tool like rasa_nlu, importing flair overrides all logging except its own, breaking functionality in rasa_nlu.
This is the [line that does so ](https://github.com/zalandoresearch/flair/blob/c2bb0d8776f25493a5b994dcd89a96f71ac175b8/flair/__init__.py#L13) and it was done on purpose to disable BERT logging in #282 .
**Describe the solution you'd like**
Ideally, the problem of disabling logging from certain known dependencies should be much more limited in scope. Importing flair as a package shouldn't disable all the other loggers. At a minimum, perhaps the logging could only *optionally* disable all existing logs
</issue>
<code>
[start of flair/__init__.py]
1 import torch
2
3 from . import data
4 from . import models
5 from . import visual
6 from . import trainers
7
8 import logging.config
9
10
11 logging.config.dictConfig({
12 'version': 1,
13 'disable_existing_loggers': True,
14 'formatters': {
15 'standard': {
16 'format': '%(asctime)-15s %(message)s'
17 },
18 },
19 'handlers': {
20 'console': {
21 'level': 'INFO',
22 'class': 'logging.StreamHandler',
23 'formatter': 'standard',
24 'stream': 'ext://sys.stdout'
25 },
26 },
27 'loggers': {
28 'flair': {
29 'handlers': ['console'],
30 'level': 'INFO',
31 'propagate': False
32 }
33 },
34 'root': {
35 'handlers': ['console'],
36 'level': 'WARNING'
37 }
38 })
39
40 logger = logging.getLogger('flair')
41
42
43 device = None
44 if torch.cuda.is_available():
45 device = torch.device('cuda:0')
46 else:
47 device = torch.device('cpu')
48
[end of flair/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flair/__init__.py b/flair/__init__.py
--- a/flair/__init__.py
+++ b/flair/__init__.py
@@ -10,7 +10,7 @@
logging.config.dictConfig({
'version': 1,
- 'disable_existing_loggers': True,
+ 'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)-15s %(message)s'
| {"golden_diff": "diff --git a/flair/__init__.py b/flair/__init__.py\n--- a/flair/__init__.py\n+++ b/flair/__init__.py\n@@ -10,7 +10,7 @@\n \n logging.config.dictConfig({\n 'version': 1,\n- 'disable_existing_loggers': True,\n+ 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n", "issue": "Logging overwrite less sweeping\nTo be removed, once it is done: Please add the appropriate label to this ticket, e.g. feature or enhancement.\r\n\r\n**Is your feature/enhancement request related to a problem? Please describe.**\r\nWhen using flair in other applications, the fact that it disables existing logs in `__init__.py` can be detrimental. For instance when wrapping it up as a component in a tool like rasa_nlu, importing flair overrides all logging except its own, breaking functionality in rasa_nlu.\r\n\r\nThis is the [line that does so ](https://github.com/zalandoresearch/flair/blob/c2bb0d8776f25493a5b994dcd89a96f71ac175b8/flair/__init__.py#L13) and it was done on purpose to disable BERT logging in #282 .\r\n\r\n**Describe the solution you'd like**\r\nIdeally, the problem of disabling logging from certain known dependencies should be much more limited in scope. Importing flair as a package shouldn't disable all the other loggers. At a minimum, perhaps the logging could only *optionally* disable all existing logs\r\n\n", "before_files": [{"content": "import torch\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\n\nimport logging.config\n\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'flair': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'WARNING'\n }\n})\n\nlogger = logging.getLogger('flair')\n\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n", "path": "flair/__init__.py"}]} | 1,108 | 108 |
gh_patches_debug_21996 | rasdani/github-patches | git_diff | qtile__qtile-2490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
widget.CheckUpdates on Fedora: show the correct number of updates
widget.CheckUpdates on Fedora shows -2 updates when no updates found.
Excerpt from my config.py:
```
widget.CheckUpdates(
distro='Fedora',
display_format=' {updates} updates',
colour_have_updates=colors[3],
no_update_string=' no update',
update_interval=1800,
colour_no_updates=colors[5],
background=colors[8],
),
```
# Qtile version
0.17.1dev
# distro
Fedora 34
</issue>
<code>
[start of libqtile/widget/check_updates.py]
1 # Copyright (c) 2015 Ali Mousavi
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 import os
22 from subprocess import CalledProcessError, Popen
23
24 from libqtile.log_utils import logger
25 from libqtile.widget import base
26
27
28 class CheckUpdates(base.ThreadPoolText):
29 """Shows number of pending updates in different unix systems"""
30 orientations = base.ORIENTATION_HORIZONTAL
31 defaults = [
32 ("distro", "Arch", "Name of your distribution"),
33 ("custom_command", None, "Custom shell command for checking updates (counts the lines of the output)"),
34 ("custom_command_modify", (lambda x: x), "Lambda function to modify line count from custom_command"),
35 ("update_interval", 60, "Update interval in seconds."),
36 ('execute', None, 'Command to execute on click'),
37 ("display_format", "Updates: {updates}", "Display format if updates available"),
38 ("colour_no_updates", "ffffff", "Colour when there's no updates."),
39 ("colour_have_updates", "ffffff", "Colour when there are updates."),
40 ("restart_indicator", "", "Indicator to represent reboot is required. (Ubuntu only)"),
41 ("no_update_string", "", "String to display if no updates available")
42 ]
43
44 def __init__(self, **config):
45 base.ThreadPoolText.__init__(self, "", **config)
46 self.add_defaults(CheckUpdates.defaults)
47
48 # Helpful to have this as a variable as we can shorten it for testing
49 self.execute_polling_interval = 1
50
51 # format: "Distro": ("cmd", "number of lines to subtract from output")
52 self.cmd_dict = {"Arch": ("pacman -Qu", 0),
53 "Arch_checkupdates": ("checkupdates", 0),
54 "Arch_Sup": ("pacman -Sup", 1),
55 "Arch_yay": ("yay -Qu", 0),
56 "Debian": ("apt-show-versions -u -b", 0),
57 "Ubuntu": ("aptitude search ~U", 0),
58 "Fedora": ("dnf list updates", 3),
59 "FreeBSD": ("pkg_version -I -l '<'", 0),
60 "Mandriva": ("urpmq --auto-select", 0)
61 }
62
63 if self.custom_command:
64 # Use custom_command
65 self.cmd = self.custom_command
66
67 else:
68 # Check if distro name is valid.
69 try:
70 self.cmd = self.cmd_dict[self.distro][0]
71 self.custom_command_modify = (lambda x: x - self.cmd_dict[self.distro][1])
72 except KeyError:
73 distros = sorted(self.cmd_dict.keys())
74 logger.error(self.distro + ' is not a valid distro name. ' +
75 'Use one of the list: ' + str(distros) + '.')
76 self.cmd = None
77
78 if self.execute:
79 self.add_callbacks({'Button1': self.do_execute})
80
81 def _check_updates(self):
82 # type: () -> str
83 try:
84 updates = self.call_process(self.cmd, shell=True)
85 except CalledProcessError:
86 updates = ""
87 num_updates = self.custom_command_modify(len(updates.splitlines()))
88
89 if num_updates == 0:
90 self.layout.colour = self.colour_no_updates
91 return self.no_update_string
92 num_updates = str(num_updates)
93
94 if self.restart_indicator and os.path.exists('/var/run/reboot-required'):
95 num_updates += self.restart_indicator
96
97 self.layout.colour = self.colour_have_updates
98 return self.display_format.format(**{"updates": num_updates})
99
100 def poll(self):
101 # type: () -> str
102 if not self.cmd:
103 return "N/A"
104 return self._check_updates()
105
106 def do_execute(self):
107 self._process = Popen(self.execute, shell=True)
108 self.timeout_add(self.execute_polling_interval, self._refresh_count)
109
110 def _refresh_count(self):
111 if self._process.poll() is None:
112 self.timeout_add(self.execute_polling_interval, self._refresh_count)
113
114 else:
115 self.timer_setup()
116
[end of libqtile/widget/check_updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py
--- a/libqtile/widget/check_updates.py
+++ b/libqtile/widget/check_updates.py
@@ -55,7 +55,7 @@
"Arch_yay": ("yay -Qu", 0),
"Debian": ("apt-show-versions -u -b", 0),
"Ubuntu": ("aptitude search ~U", 0),
- "Fedora": ("dnf list updates", 3),
+ "Fedora": ("dnf list updates -q", 1),
"FreeBSD": ("pkg_version -I -l '<'", 0),
"Mandriva": ("urpmq --auto-select", 0)
}
@@ -86,6 +86,8 @@
updates = ""
num_updates = self.custom_command_modify(len(updates.splitlines()))
+ if num_updates < 0:
+ num_updates = 0
if num_updates == 0:
self.layout.colour = self.colour_no_updates
return self.no_update_string
| {"golden_diff": "diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py\n--- a/libqtile/widget/check_updates.py\n+++ b/libqtile/widget/check_updates.py\n@@ -55,7 +55,7 @@\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n- \"Fedora\": (\"dnf list updates\", 3),\n+ \"Fedora\": (\"dnf list updates -q\", 1),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0)\n }\n@@ -86,6 +86,8 @@\n updates = \"\"\n num_updates = self.custom_command_modify(len(updates.splitlines()))\n \n+ if num_updates < 0:\n+ num_updates = 0\n if num_updates == 0:\n self.layout.colour = self.colour_no_updates\n return self.no_update_string\n", "issue": "widget.CheckUpdates on Fedora: show the correct number of updates\nwidget.CheckUpdates on Fedora shows -2 updates when no updates found.\r\n\r\nExcerpt from my config.py:\r\n```\r\nwidget.CheckUpdates(\r\n distro='Fedora',\r\n display_format=' {updates} updates',\r\n colour_have_updates=colors[3],\r\n no_update_string=' no update',\r\n update_interval=1800,\r\n colour_no_updates=colors[5],\r\n background=colors[8],\r\n ),\r\n```\r\n# Qtile version\r\n0.17.1dev\r\n# distro\r\nFedora 34\n", "before_files": [{"content": "# Copyright (c) 2015 Ali Mousavi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom subprocess import CalledProcessError, Popen\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CheckUpdates(base.ThreadPoolText):\n \"\"\"Shows number of pending updates in different unix systems\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n (\"custom_command\", None, \"Custom shell command for checking updates (counts the lines of the output)\"),\n (\"custom_command_modify\", (lambda x: x), \"Lambda function to modify line count from custom_command\"),\n (\"update_interval\", 60, \"Update interval in seconds.\"),\n ('execute', None, 'Command to execute on click'),\n (\"display_format\", \"Updates: {updates}\", \"Display format if updates available\"),\n (\"colour_no_updates\", \"ffffff\", \"Colour when there's no updates.\"),\n (\"colour_have_updates\", \"ffffff\", \"Colour when there are updates.\"),\n (\"restart_indicator\", \"\", \"Indicator to represent reboot is required. (Ubuntu only)\"),\n (\"no_update_string\", \"\", \"String to display if no updates available\")\n ]\n\n def __init__(self, **config):\n base.ThreadPoolText.__init__(self, \"\", **config)\n self.add_defaults(CheckUpdates.defaults)\n\n # Helpful to have this as a variable as we can shorten it for testing\n self.execute_polling_interval = 1\n\n # format: \"Distro\": (\"cmd\", \"number of lines to subtract from output\")\n self.cmd_dict = {\"Arch\": (\"pacman -Qu\", 0),\n \"Arch_checkupdates\": (\"checkupdates\", 0),\n \"Arch_Sup\": (\"pacman -Sup\", 1),\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n \"Fedora\": (\"dnf list updates\", 3),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0)\n }\n\n if self.custom_command:\n # Use custom_command\n self.cmd = self.custom_command\n\n else:\n # Check if distro name is valid.\n try:\n self.cmd = self.cmd_dict[self.distro][0]\n self.custom_command_modify = (lambda x: x - self.cmd_dict[self.distro][1])\n except KeyError:\n distros = sorted(self.cmd_dict.keys())\n logger.error(self.distro + ' is not a valid distro name. ' +\n 'Use one of the list: ' + str(distros) + '.')\n self.cmd = None\n\n if self.execute:\n self.add_callbacks({'Button1': self.do_execute})\n\n def _check_updates(self):\n # type: () -> str\n try:\n updates = self.call_process(self.cmd, shell=True)\n except CalledProcessError:\n updates = \"\"\n num_updates = self.custom_command_modify(len(updates.splitlines()))\n\n if num_updates == 0:\n self.layout.colour = self.colour_no_updates\n return self.no_update_string\n num_updates = str(num_updates)\n\n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n\n self.layout.colour = self.colour_have_updates\n return self.display_format.format(**{\"updates\": num_updates})\n\n def poll(self):\n # type: () -> str\n if not self.cmd:\n return \"N/A\"\n return self._check_updates()\n\n def do_execute(self):\n self._process = Popen(self.execute, shell=True)\n self.timeout_add(self.execute_polling_interval, self._refresh_count)\n\n def _refresh_count(self):\n if self._process.poll() is None:\n self.timeout_add(self.execute_polling_interval, self._refresh_count)\n\n else:\n self.timer_setup()\n", "path": "libqtile/widget/check_updates.py"}]} | 2,006 | 243 |
gh_patches_debug_12045 | rasdani/github-patches | git_diff | carpentries__amy-2639 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Require email reschedule date/time to be in the future
Currently past date/time values are allowed.
</issue>
<code>
[start of amy/emails/forms.py]
1 from django import forms
2 from markdownx.fields import MarkdownxFormField
3
4 from emails.models import EmailTemplate, ScheduledEmail
5 from emails.signals import SignalNameEnum
6 from workshops.forms import BootstrapHelper
7
8
9 class EmailTemplateCreateForm(forms.ModelForm):
10 body = MarkdownxFormField(
11 label=EmailTemplate._meta.get_field("body").verbose_name,
12 help_text=EmailTemplate._meta.get_field("body").help_text,
13 widget=forms.Textarea,
14 )
15 signal = forms.CharField(
16 help_text=EmailTemplate._meta.get_field("signal").help_text,
17 widget=forms.Select(choices=SignalNameEnum.choices()),
18 )
19
20 class Meta:
21 model = EmailTemplate
22 fields = [
23 "name",
24 "active",
25 "signal",
26 "from_header",
27 "reply_to_header",
28 "cc_header",
29 "bcc_header",
30 "subject",
31 "body",
32 ]
33
34 def __init__(self, *args, **kwargs):
35 super().__init__(*args, **kwargs)
36
37 array_email_field_help_text = "Separate email addresses with a comma"
38 self.fields["cc_header"].help_text = array_email_field_help_text
39 self.fields["bcc_header"].help_text = array_email_field_help_text
40
41
42 class EmailTemplateUpdateForm(EmailTemplateCreateForm):
43 signal = forms.CharField(
44 required=False,
45 disabled=True,
46 help_text=EmailTemplate._meta.get_field("signal").help_text,
47 widget=forms.Select(choices=SignalNameEnum.choices()),
48 )
49
50 class Meta(EmailTemplateCreateForm.Meta):
51 pass
52
53
54 class ScheduledEmailUpdateForm(forms.ModelForm):
55 body = MarkdownxFormField(
56 label=ScheduledEmail._meta.get_field("body").verbose_name,
57 help_text=ScheduledEmail._meta.get_field("body").help_text,
58 widget=forms.Textarea,
59 )
60
61 class Meta:
62 model = ScheduledEmail
63 fields = [
64 "to_header",
65 "from_header",
66 "reply_to_header",
67 "cc_header",
68 "bcc_header",
69 "subject",
70 "body",
71 ]
72
73 def __init__(self, *args, **kwargs):
74 super().__init__(*args, **kwargs)
75
76 array_email_field_help_text = "Separate email addresses with a comma"
77 self.fields["to_header"].help_text = array_email_field_help_text
78 self.fields["cc_header"].help_text = array_email_field_help_text
79 self.fields["bcc_header"].help_text = array_email_field_help_text
80
81
82 class ScheduledEmailRescheduleForm(forms.Form):
83 scheduled_at = forms.SplitDateTimeField(
84 label=ScheduledEmail._meta.get_field("scheduled_at").verbose_name,
85 help_text="Time in UTC",
86 )
87
88 helper = BootstrapHelper(submit_label="Update")
89
90
91 class ScheduledEmailCancelForm(forms.Form):
92 confirm = forms.CharField(required=False)
93 decline = forms.CharField(required=False)
94
[end of amy/emails/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/emails/forms.py b/amy/emails/forms.py
--- a/amy/emails/forms.py
+++ b/amy/emails/forms.py
@@ -1,3 +1,5 @@
+from datetime import UTC, datetime
+
from django import forms
from markdownx.fields import MarkdownxFormField
@@ -87,6 +89,14 @@
helper = BootstrapHelper(submit_label="Update")
+ def clean_scheduled_at(self):
+ scheduled_at = self.cleaned_data["scheduled_at"]
+
+ if scheduled_at < datetime.now(tz=UTC):
+ raise forms.ValidationError("Scheduled time cannot be in the past.")
+
+ return scheduled_at
+
class ScheduledEmailCancelForm(forms.Form):
confirm = forms.CharField(required=False)
| {"golden_diff": "diff --git a/amy/emails/forms.py b/amy/emails/forms.py\n--- a/amy/emails/forms.py\n+++ b/amy/emails/forms.py\n@@ -1,3 +1,5 @@\n+from datetime import UTC, datetime\n+\n from django import forms\n from markdownx.fields import MarkdownxFormField\n \n@@ -87,6 +89,14 @@\n \n helper = BootstrapHelper(submit_label=\"Update\")\n \n+ def clean_scheduled_at(self):\n+ scheduled_at = self.cleaned_data[\"scheduled_at\"]\n+\n+ if scheduled_at < datetime.now(tz=UTC):\n+ raise forms.ValidationError(\"Scheduled time cannot be in the past.\")\n+\n+ return scheduled_at\n+\n \n class ScheduledEmailCancelForm(forms.Form):\n confirm = forms.CharField(required=False)\n", "issue": "Require email reschedule date/time to be in the future\nCurrently past date/time values are allowed. \n", "before_files": [{"content": "from django import forms\nfrom markdownx.fields import MarkdownxFormField\n\nfrom emails.models import EmailTemplate, ScheduledEmail\nfrom emails.signals import SignalNameEnum\nfrom workshops.forms import BootstrapHelper\n\n\nclass EmailTemplateCreateForm(forms.ModelForm):\n body = MarkdownxFormField(\n label=EmailTemplate._meta.get_field(\"body\").verbose_name,\n help_text=EmailTemplate._meta.get_field(\"body\").help_text,\n widget=forms.Textarea,\n )\n signal = forms.CharField(\n help_text=EmailTemplate._meta.get_field(\"signal\").help_text,\n widget=forms.Select(choices=SignalNameEnum.choices()),\n )\n\n class Meta:\n model = EmailTemplate\n fields = [\n \"name\",\n \"active\",\n \"signal\",\n \"from_header\",\n \"reply_to_header\",\n \"cc_header\",\n \"bcc_header\",\n \"subject\",\n \"body\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n array_email_field_help_text = \"Separate email addresses with a comma\"\n self.fields[\"cc_header\"].help_text = array_email_field_help_text\n self.fields[\"bcc_header\"].help_text = array_email_field_help_text\n\n\nclass EmailTemplateUpdateForm(EmailTemplateCreateForm):\n signal = forms.CharField(\n required=False,\n disabled=True,\n help_text=EmailTemplate._meta.get_field(\"signal\").help_text,\n widget=forms.Select(choices=SignalNameEnum.choices()),\n )\n\n class Meta(EmailTemplateCreateForm.Meta):\n pass\n\n\nclass ScheduledEmailUpdateForm(forms.ModelForm):\n body = MarkdownxFormField(\n label=ScheduledEmail._meta.get_field(\"body\").verbose_name,\n help_text=ScheduledEmail._meta.get_field(\"body\").help_text,\n widget=forms.Textarea,\n )\n\n class Meta:\n model = ScheduledEmail\n fields = [\n \"to_header\",\n \"from_header\",\n \"reply_to_header\",\n \"cc_header\",\n \"bcc_header\",\n \"subject\",\n \"body\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n array_email_field_help_text = \"Separate email addresses with a comma\"\n self.fields[\"to_header\"].help_text = array_email_field_help_text\n self.fields[\"cc_header\"].help_text = array_email_field_help_text\n self.fields[\"bcc_header\"].help_text = array_email_field_help_text\n\n\nclass ScheduledEmailRescheduleForm(forms.Form):\n scheduled_at = forms.SplitDateTimeField(\n label=ScheduledEmail._meta.get_field(\"scheduled_at\").verbose_name,\n help_text=\"Time in UTC\",\n )\n\n helper = BootstrapHelper(submit_label=\"Update\")\n\n\nclass ScheduledEmailCancelForm(forms.Form):\n confirm = forms.CharField(required=False)\n decline = forms.CharField(required=False)\n", "path": "amy/emails/forms.py"}]} | 1,348 | 170 |
gh_patches_debug_60690 | rasdani/github-patches | git_diff | biolab__orange3-text-356 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bag of Words: crashes if < 11 tokens on the input
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.3.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.14.dev
##### Expected behavior
Bag of Words doesn't crash on few tokens
##### Actual behavior
BoW crashes if less then 11 tokens on the input and Binary option selected.
##### Steps to reproduce the behavior
Corpus - Preprocess Text (have it output less than 11 types) - Bag of Words (Binary)
##### Additional info (worksheets, data, screenshots, ...)
</issue>
<code>
[start of orangecontrib/text/vectorization/bagofwords.py]
1 """ This module constructs a new corpus with tokens as features.
2
3 First create a corpus::
4
5 >>> from orangecontrib.text import Corpus
6 >>> corpus = Corpus.from_file('deerwester')
7 >>> corpus.domain
8 [ | Category] {Text}
9
10 Then create :class:`BowVectorizer` object and call transform:
11
12 >>> from orangecontrib.text.vectorization.bagofwords import BowVectorizer
13 >>> bow = BowVectorizer()
14 >>> new_corpus = bow.transform(corpus)
15 >>> new_corpus.domain
16 [a, abc, and, applications, binary, computer, engineering, eps, error, for,
17 generation, graph, human, in, interface, intersection, iv, lab, machine,
18 management, measurement, minors, of, opinion, ordering, paths, perceived,
19 quasi, random, relation, response, survey, system, testing, the, time, to,
20 trees, unordered, user, well, widths | Category] {Text}
21
22 """
23
24 from collections import OrderedDict
25 from functools import partial
26
27 import numpy as np
28 from gensim import corpora, models, matutils
29 from sklearn.preprocessing import normalize
30
31 from orangecontrib.text.vectorization.base import BaseVectorizer,\
32 SharedTransform, VectorizationComputeValue
33
34
35 class BowVectorizer(BaseVectorizer):
36 name = 'BoW Vectorizer'
37
38 COUNT = 'Count'
39 BINARY = 'Binary'
40 SUBLINEAR = 'Sublinear'
41 NONE = '(None)'
42 IDF = 'IDF'
43 SMOOTH = 'Smooth IDF'
44 L1 = 'L1 (Sum of elements)'
45 L2 = 'L2 (Euclidean)'
46
47 wlocals = OrderedDict((
48 (COUNT, lambda tf: tf),
49 (BINARY, lambda tf: np.greater(tf, 0, dtype=np.int)),
50 (SUBLINEAR, lambda tf: 1 + np.log(tf)),
51 ))
52
53 wglobals = OrderedDict((
54 (NONE, lambda df, N: 1),
55 (IDF, lambda df, N: np.log(N/df)),
56 (SMOOTH, lambda df, N: np.log(1 + N/df)),
57 ))
58
59 norms = OrderedDict((
60 (NONE, None),
61 (L1, partial(normalize, norm='l1')),
62 (L2, partial(normalize, norm='l2')),
63 ))
64
65 def __init__(self, norm=NONE, wlocal=COUNT, wglobal=NONE):
66 self.norm = norm
67 self.wlocal = wlocal
68 self.wglobal = wglobal
69
70 def _transform(self, corpus, source_dict=None):
71 temp_corpus = list(corpus.ngrams_iterator(' ', include_postags=True))
72 dic = corpora.Dictionary(temp_corpus, prune_at=None) if not source_dict else source_dict
73 temp_corpus = [dic.doc2bow(doc) for doc in temp_corpus]
74 model = models.TfidfModel(temp_corpus, normalize=False,
75 wlocal=self.wlocals[self.wlocal],
76 wglobal=self.wglobals[self.wglobal])
77
78 X = matutils.corpus2csc(model[temp_corpus], dtype=np.float, num_terms=len(dic)).T
79 norm = self.norms[self.norm]
80 if norm:
81 X = norm(X)
82
83 # set compute values
84 shared_cv = SharedTransform(self, corpus.used_preprocessor,
85 source_dict=dic)
86 cv = [VectorizationComputeValue(shared_cv, dic[i])
87 for i in range(len(dic))]
88
89 self.add_features(corpus, X, dic, cv, var_attrs={'bow-feature': True})
90 return corpus
91
92 def report(self):
93 return (('Term Frequency', self.wlocal),
94 ('Document Frequency', self.wglobal),
95 ('Regularization', self.norm),)
96
[end of orangecontrib/text/vectorization/bagofwords.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/orangecontrib/text/vectorization/bagofwords.py b/orangecontrib/text/vectorization/bagofwords.py
--- a/orangecontrib/text/vectorization/bagofwords.py
+++ b/orangecontrib/text/vectorization/bagofwords.py
@@ -46,7 +46,8 @@
wlocals = OrderedDict((
(COUNT, lambda tf: tf),
- (BINARY, lambda tf: np.greater(tf, 0, dtype=np.int)),
+ (BINARY, lambda tf: np.greater(tf, 0, dtype=np.int) if tf.size
+ else np.array([], dtype=np.int)),
(SUBLINEAR, lambda tf: 1 + np.log(tf)),
))
| {"golden_diff": "diff --git a/orangecontrib/text/vectorization/bagofwords.py b/orangecontrib/text/vectorization/bagofwords.py\n--- a/orangecontrib/text/vectorization/bagofwords.py\n+++ b/orangecontrib/text/vectorization/bagofwords.py\n@@ -46,7 +46,8 @@\n \n wlocals = OrderedDict((\n (COUNT, lambda tf: tf),\n- (BINARY, lambda tf: np.greater(tf, 0, dtype=np.int)),\n+ (BINARY, lambda tf: np.greater(tf, 0, dtype=np.int) if tf.size\n+ else np.array([], dtype=np.int)),\n (SUBLINEAR, lambda tf: 1 + np.log(tf)),\n ))\n", "issue": "Bag of Words: crashes if < 11 tokens on the input\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.3.0\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.14.dev\r\n\r\n##### Expected behavior\r\nBag of Words doesn't crash on few tokens\r\n\r\n\r\n##### Actual behavior\r\nBoW crashes if less then 11 tokens on the input and Binary option selected.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nCorpus - Preprocess Text (have it output less than 11 types) - Bag of Words (Binary)\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\" This module constructs a new corpus with tokens as features.\n\nFirst create a corpus::\n\n >>> from orangecontrib.text import Corpus\n >>> corpus = Corpus.from_file('deerwester')\n >>> corpus.domain\n [ | Category] {Text}\n\nThen create :class:`BowVectorizer` object and call transform:\n\n >>> from orangecontrib.text.vectorization.bagofwords import BowVectorizer\n >>> bow = BowVectorizer()\n >>> new_corpus = bow.transform(corpus)\n >>> new_corpus.domain\n [a, abc, and, applications, binary, computer, engineering, eps, error, for,\n generation, graph, human, in, interface, intersection, iv, lab, machine,\n management, measurement, minors, of, opinion, ordering, paths, perceived,\n quasi, random, relation, response, survey, system, testing, the, time, to,\n trees, unordered, user, well, widths | Category] {Text}\n\n\"\"\"\n\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport numpy as np\nfrom gensim import corpora, models, matutils\nfrom sklearn.preprocessing import normalize\n\nfrom orangecontrib.text.vectorization.base import BaseVectorizer,\\\n SharedTransform, VectorizationComputeValue\n\n\nclass BowVectorizer(BaseVectorizer):\n name = 'BoW Vectorizer'\n\n COUNT = 'Count'\n BINARY = 'Binary'\n SUBLINEAR = 'Sublinear'\n NONE = '(None)'\n IDF = 'IDF'\n SMOOTH = 'Smooth IDF'\n L1 = 'L1 (Sum of elements)'\n L2 = 'L2 (Euclidean)'\n\n wlocals = OrderedDict((\n (COUNT, lambda tf: tf),\n (BINARY, lambda tf: np.greater(tf, 0, dtype=np.int)),\n (SUBLINEAR, lambda tf: 1 + np.log(tf)),\n ))\n\n wglobals = OrderedDict((\n (NONE, lambda df, N: 1),\n (IDF, lambda df, N: np.log(N/df)),\n (SMOOTH, lambda df, N: np.log(1 + N/df)),\n ))\n\n norms = OrderedDict((\n (NONE, None),\n (L1, partial(normalize, norm='l1')),\n (L2, partial(normalize, norm='l2')),\n ))\n\n def __init__(self, norm=NONE, wlocal=COUNT, wglobal=NONE):\n self.norm = norm\n self.wlocal = wlocal\n self.wglobal = wglobal\n\n def _transform(self, corpus, source_dict=None):\n temp_corpus = list(corpus.ngrams_iterator(' ', include_postags=True))\n dic = corpora.Dictionary(temp_corpus, prune_at=None) if not source_dict else source_dict\n temp_corpus = [dic.doc2bow(doc) for doc in temp_corpus]\n model = models.TfidfModel(temp_corpus, normalize=False,\n wlocal=self.wlocals[self.wlocal],\n wglobal=self.wglobals[self.wglobal])\n\n X = matutils.corpus2csc(model[temp_corpus], dtype=np.float, num_terms=len(dic)).T\n norm = self.norms[self.norm]\n if norm:\n X = norm(X)\n\n # set compute values\n shared_cv = SharedTransform(self, corpus.used_preprocessor,\n source_dict=dic)\n cv = [VectorizationComputeValue(shared_cv, dic[i])\n for i in range(len(dic))]\n\n self.add_features(corpus, X, dic, cv, var_attrs={'bow-feature': True})\n return corpus\n\n def report(self):\n return (('Term Frequency', self.wlocal),\n ('Document Frequency', self.wglobal),\n ('Regularization', self.norm),)\n", "path": "orangecontrib/text/vectorization/bagofwords.py"}]} | 1,709 | 159 |
gh_patches_debug_30772 | rasdani/github-patches | git_diff | 3cn-ecn__nantralPlatform-446 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pages lentes
Certaines pages sont un peu lentes à charger:
- liste des clubs
C'est peut-être lié au grand nombre d'images, il faudrait étudier la possibilité de cacher ces images.
</issue>
<code>
[start of server/apps/club/views.py]
1 from django.views.generic import ListView, TemplateView
2 from django.contrib.auth.mixins import LoginRequiredMixin
3 from django.urls import resolve
4
5 from apps.club.models import Club, BDX
6 from apps.group.models import Group
7 from apps.group.views import BaseDetailGroupView
8
9 from apps.utils.slug import *
10
11 class ListClubView(TemplateView):
12 template_name = 'club/list.html'
13
14 def get_context_data(self, **kwargs):
15 context = {'club_list': [] }
16 try:
17 context['club_list'].append({
18 'grouper': "Mes Clubs et Assos",
19 'list': Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type'),
20 })
21 except Exception:
22 pass
23 club_list = Club.objects.all().select_related('bdx_type').only('name', 'slug', 'logo', 'bdx_type')
24 context['club_list'].append({
25 'grouper': "Associations",
26 'list': club_list.filter(bdx_type__isnull=True)
27 })
28 for bdx in BDX.objects.all():
29 context['club_list'].append({
30 'grouper': f'Clubs {bdx.name}',
31 'list': club_list.filter(bdx_type=bdx),
32 })
33 return context
34
35
36
37 class DetailClubView(BaseDetailGroupView):
38 '''Vue de détails d'un club.'''
39
40 template_name='club/detail.html'
41
42
43 class DetailGroupMembersView(LoginRequiredMixin, ListView):
44 template_name = 'club/members.html'
45
46 def get_object(self, **kwargs):
47 app = resolve(self.request.path).app_name
48 slug = self.kwargs.get("slug")
49 return get_object_from_slug(app, slug)
50
51 def get_queryset(self, **kwargs):
52 object = self.get_object()
53 members = object.members.through.objects.filter(group=object)
54 return members.order_by('year', 'order')
55
56 def get_context_data(self, **kwargs):
57 context = super().get_context_data(**kwargs)
58 context['object'] = self.get_object()
59 return context
60
[end of server/apps/club/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/server/apps/club/views.py b/server/apps/club/views.py
--- a/server/apps/club/views.py
+++ b/server/apps/club/views.py
@@ -2,8 +2,7 @@
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import resolve
-from apps.club.models import Club, BDX
-from apps.group.models import Group
+from apps.club.models import Club
from apps.group.views import BaseDetailGroupView
from apps.utils.slug import *
@@ -12,24 +11,18 @@
template_name = 'club/list.html'
def get_context_data(self, **kwargs):
- context = {'club_list': [] }
- try:
- context['club_list'].append({
- 'grouper': "Mes Clubs et Assos",
- 'list': Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type'),
- })
- except Exception:
- pass
- club_list = Club.objects.all().select_related('bdx_type').only('name', 'slug', 'logo', 'bdx_type')
- context['club_list'].append({
- 'grouper': "Associations",
- 'list': club_list.filter(bdx_type__isnull=True)
- })
- for bdx in BDX.objects.all():
- context['club_list'].append({
- 'grouper': f'Clubs {bdx.name}',
- 'list': club_list.filter(bdx_type=bdx),
- })
+ context = {'club_list': {} }
+ clubList = {}
+ allMembersClub = Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type')
+ for club in allMembersClub:
+ clubList.setdefault("Mes Clubs et Assos", []).append(club)
+ allClubs = Club.objects.all().select_related("bdx_type").only('name', 'slug', 'logo', 'bdx_type')
+ for club in allClubs:
+ if(club.bdx_type is None):
+ clubList.setdefault("Associations", []).append(club)
+ else:
+ clubList.setdefault(f'Clubs {club.bdx_type.name}', []).append(club)
+ context['club_list']=clubList
return context
| {"golden_diff": "diff --git a/server/apps/club/views.py b/server/apps/club/views.py\n--- a/server/apps/club/views.py\n+++ b/server/apps/club/views.py\n@@ -2,8 +2,7 @@\n from django.contrib.auth.mixins import LoginRequiredMixin\n from django.urls import resolve\n \n-from apps.club.models import Club, BDX\n-from apps.group.models import Group\n+from apps.club.models import Club\n from apps.group.views import BaseDetailGroupView\n \n from apps.utils.slug import *\n@@ -12,24 +11,18 @@\n template_name = 'club/list.html'\n \n def get_context_data(self, **kwargs):\n- context = {'club_list': [] }\n- try:\n- context['club_list'].append({\n- 'grouper': \"Mes Clubs et Assos\",\n- 'list': Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type'),\n- })\n- except Exception:\n- pass\n- club_list = Club.objects.all().select_related('bdx_type').only('name', 'slug', 'logo', 'bdx_type')\n- context['club_list'].append({\n- 'grouper': \"Associations\",\n- 'list': club_list.filter(bdx_type__isnull=True)\n- })\n- for bdx in BDX.objects.all():\n- context['club_list'].append({\n- 'grouper': f'Clubs {bdx.name}',\n- 'list': club_list.filter(bdx_type=bdx),\n- })\n+ context = {'club_list': {} }\n+ clubList = {}\n+ allMembersClub = Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type')\n+ for club in allMembersClub:\n+ clubList.setdefault(\"Mes Clubs et Assos\", []).append(club)\n+ allClubs = Club.objects.all().select_related(\"bdx_type\").only('name', 'slug', 'logo', 'bdx_type')\n+ for club in allClubs:\n+ if(club.bdx_type is None):\n+ clubList.setdefault(\"Associations\", []).append(club)\n+ else:\n+ clubList.setdefault(f'Clubs {club.bdx_type.name}', []).append(club)\n+ context['club_list']=clubList\n return context\n", "issue": "Pages lentes\nCertaines pages sont un peu lentes \u00e0 charger:\r\n- liste des clubs\r\n\r\nC'est peut-\u00eatre li\u00e9 au grand nombre d'images, il faudrait \u00e9tudier la possibilit\u00e9 de cacher ces images.\n", "before_files": [{"content": "from django.views.generic import ListView, TemplateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import resolve\n\nfrom apps.club.models import Club, BDX\nfrom apps.group.models import Group\nfrom apps.group.views import BaseDetailGroupView\n\nfrom apps.utils.slug import *\n\nclass ListClubView(TemplateView):\n template_name = 'club/list.html'\n\n def get_context_data(self, **kwargs):\n context = {'club_list': [] }\n try:\n context['club_list'].append({\n 'grouper': \"Mes Clubs et Assos\",\n 'list': Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type'),\n })\n except Exception:\n pass\n club_list = Club.objects.all().select_related('bdx_type').only('name', 'slug', 'logo', 'bdx_type')\n context['club_list'].append({\n 'grouper': \"Associations\",\n 'list': club_list.filter(bdx_type__isnull=True)\n })\n for bdx in BDX.objects.all():\n context['club_list'].append({\n 'grouper': f'Clubs {bdx.name}',\n 'list': club_list.filter(bdx_type=bdx),\n })\n return context\n\n\n\nclass DetailClubView(BaseDetailGroupView):\n '''Vue de d\u00e9tails d'un club.'''\n \n template_name='club/detail.html'\n\n\nclass DetailGroupMembersView(LoginRequiredMixin, ListView):\n template_name = 'club/members.html'\n \n def get_object(self, **kwargs):\n app = resolve(self.request.path).app_name\n slug = self.kwargs.get(\"slug\")\n return get_object_from_slug(app, slug)\n \n def get_queryset(self, **kwargs):\n object = self.get_object()\n members = object.members.through.objects.filter(group=object)\n return members.order_by('year', 'order')\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['object'] = self.get_object()\n return context\n", "path": "server/apps/club/views.py"}]} | 1,146 | 521 |
gh_patches_debug_23889 | rasdani/github-patches | git_diff | conan-io__conan-center-index-2500 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GLOG 0.4.0 : unresolved external symbol google::InitGoogleLogging
Package Name/Version: **glog/0.4.0**
Operating System+version: **Win 10**
Compiler+version: **MSVS 2015 & MSVS 2019**
Conan version: **1.28.0**
cmake version:**3.18.0**
Ninja version:**1.10.0**
This may be similar to: https://github.com/conan-io/conan-center-index/issues/1691
Using conan-cmake:
``` Bash
conan_cmake_run(
REQUIRES
glog/0.4.0
IMPORTS
${CONANIMPORTS}
BASIC_SETUP
CMAKE_TARGETS
)
add_executable( ${PROJECT_NAME} ${SOURCES} )
target_link_libraries(${PROJECT_NAME} CONAN_PKG::glog)
```
main.cpp is simple enough:
```Bash
#include <glog/logging.h>
int main(int argc, char* argv[]) {
// Initialize Google's logging library.
google::InitGoogleLogging(argv[0]);
LOG(INFO) << "This is an info message";
LOG(WARNING) << "This is a warning message";
LOG(ERROR) << "This is an error message";
LOG(FATAL) << "This is a fatal message";
return 0;
}
```
Log attached below.
[Conan_GLOG_Fail.log](https://github.com/conan-io/conan-center-index/files/5062714/Conan_GLOG_Fail.log)
</issue>
<code>
[start of recipes/glog/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 import os
3
4
5 class GlogConan(ConanFile):
6 name = "glog"
7 url = "https://github.com/conan-io/conan-center-index"
8 homepage = "https://github.com/google/glog/"
9 description = "Google logging library"
10 topics = ("conan", "glog", "logging")
11 license = "BSD 3-Clause"
12 exports_sources = ["CMakeLists.txt", "patches/**"]
13 generators = "cmake", "cmake_find_package"
14 settings = "os", "arch", "compiler", "build_type"
15 options = {"shared": [True, False], "fPIC": [True, False], "with_gflags": [True, False], "with_threads": [True, False]}
16 default_options = {"shared": False, "fPIC": True, "with_gflags": True, "with_threads": True}
17
18 _cmake = None
19
20 @property
21 def _source_subfolder(self):
22 return "source_subfolder"
23
24 def config_options(self):
25 if self.settings.os == "Windows":
26 del self.options.fPIC
27
28 def configure(self):
29 if self.options.shared:
30 del self.options.fPIC
31 if self.options.with_gflags:
32 self.options["gflags"].shared = self.options.shared
33
34 def requirements(self):
35 if self.options.with_gflags:
36 self.requires("gflags/2.2.2")
37
38 def source(self):
39 tools.get(**self.conan_data["sources"][self.version])
40 extracted_dir = self.name + "-" + self.version
41 os.rename(extracted_dir, self._source_subfolder)
42
43 def _configure_cmake(self):
44 if self._cmake:
45 return self._cmake
46 self._cmake = CMake(self)
47 self._cmake.definitions["WITH_GFLAGS"] = self.options.with_gflags
48 self._cmake.definitions["WITH_THREADS"] = self.options.with_threads
49 self._cmake.definitions["BUILD_TESTING"] = False
50 self._cmake.configure()
51 return self._cmake
52
53 def build(self):
54 for patch in self.conan_data.get("patches", {}).get(self.version, []):
55 tools.patch(**patch)
56 cmake = self._configure_cmake()
57 cmake.build()
58
59 def package(self):
60 self.copy("COPYING", dst="licenses", src=self._source_subfolder)
61 cmake = self._configure_cmake()
62 cmake.install()
63 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
64
65 def package_info(self):
66 self.cpp_info.libs = tools.collect_libs(self)
67 if self.settings.os == "Linux":
68 self.cpp_info.system_libs.append("pthread")
69
[end of recipes/glog/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/glog/all/conanfile.py b/recipes/glog/all/conanfile.py
--- a/recipes/glog/all/conanfile.py
+++ b/recipes/glog/all/conanfile.py
@@ -8,7 +8,7 @@
homepage = "https://github.com/google/glog/"
description = "Google logging library"
topics = ("conan", "glog", "logging")
- license = "BSD 3-Clause"
+ license = "BSD-3-Clause"
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake", "cmake_find_package"
settings = "os", "arch", "compiler", "build_type"
@@ -64,5 +64,12 @@
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
+ self.cpp_info.names["pkgconfig"] = ["libglog"]
if self.settings.os == "Linux":
- self.cpp_info.system_libs.append("pthread")
+ self.cpp_info.system_libs = ["pthread"]
+ elif self.settings.os == "Windows":
+ self.cpp_info.defines = ["GLOG_NO_ABBREVIATED_SEVERITIES"]
+ decl = "__declspec(dllimport)" if self.options.shared else ""
+ self.cpp_info.defines.append("GOOGLE_GLOG_DLL_DECL={}".format(decl))
+ if self.options.with_gflags and not self.options.shared:
+ self.cpp_info.defines.extend(["GFLAGS_DLL_DECLARE_FLAG=", "GFLAGS_DLL_DEFINE_FLAG="])
| {"golden_diff": "diff --git a/recipes/glog/all/conanfile.py b/recipes/glog/all/conanfile.py\n--- a/recipes/glog/all/conanfile.py\n+++ b/recipes/glog/all/conanfile.py\n@@ -8,7 +8,7 @@\n homepage = \"https://github.com/google/glog/\"\n description = \"Google logging library\"\n topics = (\"conan\", \"glog\", \"logging\")\n- license = \"BSD 3-Clause\"\n+ license = \"BSD-3-Clause\"\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n@@ -64,5 +64,12 @@\n \n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.names[\"pkgconfig\"] = [\"libglog\"]\n if self.settings.os == \"Linux\":\n- self.cpp_info.system_libs.append(\"pthread\")\n+ self.cpp_info.system_libs = [\"pthread\"]\n+ elif self.settings.os == \"Windows\":\n+ self.cpp_info.defines = [\"GLOG_NO_ABBREVIATED_SEVERITIES\"]\n+ decl = \"__declspec(dllimport)\" if self.options.shared else \"\"\n+ self.cpp_info.defines.append(\"GOOGLE_GLOG_DLL_DECL={}\".format(decl))\n+ if self.options.with_gflags and not self.options.shared:\n+ self.cpp_info.defines.extend([\"GFLAGS_DLL_DECLARE_FLAG=\", \"GFLAGS_DLL_DEFINE_FLAG=\"])\n", "issue": "GLOG 0.4.0 : unresolved external symbol google::InitGoogleLogging\n\r\nPackage Name/Version: **glog/0.4.0**\r\nOperating System+version: **Win 10**\r\nCompiler+version: **MSVS 2015 & MSVS 2019**\r\nConan version: **1.28.0**\r\ncmake version:**3.18.0**\r\nNinja version:**1.10.0**\r\n\r\nThis may be similar to: https://github.com/conan-io/conan-center-index/issues/1691\r\n\r\nUsing conan-cmake:\r\n``` Bash\r\nconan_cmake_run(\r\n REQUIRES\r\n glog/0.4.0\r\n IMPORTS\r\n ${CONANIMPORTS}\r\n BASIC_SETUP\r\n CMAKE_TARGETS\r\n )\r\n\r\nadd_executable( ${PROJECT_NAME} ${SOURCES} )\r\ntarget_link_libraries(${PROJECT_NAME} CONAN_PKG::glog)\r\n\r\n```\r\n\r\nmain.cpp is simple enough:\r\n\r\n```Bash\r\n\r\n#include <glog/logging.h>\r\n\r\nint main(int argc, char* argv[]) {\r\n\r\n\r\n // Initialize Google's logging library.\r\n google::InitGoogleLogging(argv[0]);\r\n\r\n LOG(INFO) << \"This is an info message\";\r\n LOG(WARNING) << \"This is a warning message\";\r\n LOG(ERROR) << \"This is an error message\";\r\n LOG(FATAL) << \"This is a fatal message\";\r\n\r\n return 0;\r\n}\r\n```\r\n\r\nLog attached below.\r\n[Conan_GLOG_Fail.log](https://github.com/conan-io/conan-center-index/files/5062714/Conan_GLOG_Fail.log)\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\n\n\nclass GlogConan(ConanFile):\n name = \"glog\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/google/glog/\"\n description = \"Google logging library\"\n topics = (\"conan\", \"glog\", \"logging\")\n license = \"BSD 3-Clause\"\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"with_gflags\": [True, False], \"with_threads\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True, \"with_gflags\": True, \"with_threads\": True}\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n if self.options.with_gflags:\n self.options[\"gflags\"].shared = self.options.shared\n\n def requirements(self):\n if self.options.with_gflags:\n self.requires(\"gflags/2.2.2\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"WITH_GFLAGS\"] = self.options.with_gflags\n self._cmake.definitions[\"WITH_THREADS\"] = self.options.with_threads\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n", "path": "recipes/glog/all/conanfile.py"}]} | 1,627 | 342 |
gh_patches_debug_9801 | rasdani/github-patches | git_diff | joke2k__faker-836 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add method to generate a cell phone number to pt-BR
Faker doesn't have a function to generate a cellphone to Brazilian.
Steps to reproduce
Create fake instance using localization "pt_BR"
Call fake.msisdn() or fake.phone_number()
Expected behavior
It should generate a cell phone number.
Actual behavior
Sometimes these methods return a "residential" numbers.
Reference difference between cell phones and residential numbers:
http://www.teleco.com.br/num.asp
</issue>
<code>
[start of faker/providers/phone_number/pt_BR/__init__.py]
1 from __future__ import unicode_literals
2 from .. import Provider as PhoneNumberProvider
3
4
5 class Provider(PhoneNumberProvider):
6 formats = (
7 '+55 (011) #### ####',
8 '+55 (021) #### ####',
9 '+55 (031) #### ####',
10 '+55 (041) #### ####',
11 '+55 (051) #### ####',
12 '+55 (061) #### ####',
13 '+55 (071) #### ####',
14 '+55 (081) #### ####',
15 '+55 11 #### ####',
16 '+55 21 #### ####',
17 '+55 31 #### ####',
18 '+55 41 #### ####',
19 '+55 51 ### ####',
20 '+55 61 #### ####',
21 '+55 71 #### ####',
22 '+55 81 #### ####',
23 '+55 (011) ####-####',
24 '+55 (021) ####-####',
25 '+55 (031) ####-####',
26 '+55 (041) ####-####',
27 '+55 (051) ####-####',
28 '+55 (061) ####-####',
29 '+55 (071) ####-####',
30 '+55 (081) ####-####',
31 '+55 11 ####-####',
32 '+55 21 ####-####',
33 '+55 31 ####-####',
34 '+55 41 ####-####',
35 '+55 51 ### ####',
36 '+55 61 ####-####',
37 '+55 71 ####-####',
38 '+55 81 ####-####',
39 '(011) #### ####',
40 '(021) #### ####',
41 '(031) #### ####',
42 '(041) #### ####',
43 '(051) #### ####',
44 '(061) #### ####',
45 '(071) #### ####',
46 '(081) #### ####',
47 '11 #### ####',
48 '21 #### ####',
49 '31 #### ####',
50 '41 #### ####',
51 '51 ### ####',
52 '61 #### ####',
53 '71 #### ####',
54 '81 #### ####',
55 '(011) ####-####',
56 '(021) ####-####',
57 '(031) ####-####',
58 '(041) ####-####',
59 '(051) ####-####',
60 '(061) ####-####',
61 '(071) ####-####',
62 '(081) ####-####',
63 '11 ####-####',
64 '21 ####-####',
65 '31 ####-####',
66 '41 ####-####',
67 '51 ### ####',
68 '61 ####-####',
69 '71 ####-####',
70 '81 ####-####',
71 '#### ####',
72 '####-####',
73 )
74 msisdn_formats = (
75 '5511#########',
76 '5521#########',
77 '5531#########',
78 '5541#########',
79 '5551#########',
80 '5561#########',
81 '5571#########',
82 '5581#########',
83 )
84
[end of faker/providers/phone_number/pt_BR/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/phone_number/pt_BR/__init__.py b/faker/providers/phone_number/pt_BR/__init__.py
--- a/faker/providers/phone_number/pt_BR/__init__.py
+++ b/faker/providers/phone_number/pt_BR/__init__.py
@@ -71,6 +71,7 @@
'#### ####',
'####-####',
)
+
msisdn_formats = (
'5511#########',
'5521#########',
@@ -81,3 +82,11 @@
'5571#########',
'5581#########',
)
+
+ cellphone_formats = (
+ '+55 9#### ####',
+ )
+
+ def cellphone_number(self):
+ pattern = self.random_element(self.cellphone_formats)
+ return self.numerify(self.generator.parse(pattern))
| {"golden_diff": "diff --git a/faker/providers/phone_number/pt_BR/__init__.py b/faker/providers/phone_number/pt_BR/__init__.py\n--- a/faker/providers/phone_number/pt_BR/__init__.py\n+++ b/faker/providers/phone_number/pt_BR/__init__.py\n@@ -71,6 +71,7 @@\n '#### ####',\n '####-####',\n )\n+\n msisdn_formats = (\n '5511#########',\n '5521#########',\n@@ -81,3 +82,11 @@\n '5571#########',\n '5581#########',\n )\n+\n+ cellphone_formats = (\n+ '+55 9#### ####',\n+ )\n+\n+ def cellphone_number(self):\n+ pattern = self.random_element(self.cellphone_formats)\n+ return self.numerify(self.generator.parse(pattern))\n", "issue": "Add method to generate a cell phone number to pt-BR\nFaker doesn't have a function to generate a cellphone to Brazilian.\r\n\r\nSteps to reproduce\r\nCreate fake instance using localization \"pt_BR\"\r\nCall fake.msisdn() or fake.phone_number()\r\nExpected behavior\r\nIt should generate a cell phone number.\r\n\r\nActual behavior\r\nSometimes these methods return a \"residential\" numbers.\r\n\r\nReference difference between cell phones and residential numbers:\r\n\r\nhttp://www.teleco.com.br/num.asp\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom .. import Provider as PhoneNumberProvider\n\n\nclass Provider(PhoneNumberProvider):\n formats = (\n '+55 (011) #### ####',\n '+55 (021) #### ####',\n '+55 (031) #### ####',\n '+55 (041) #### ####',\n '+55 (051) #### ####',\n '+55 (061) #### ####',\n '+55 (071) #### ####',\n '+55 (081) #### ####',\n '+55 11 #### ####',\n '+55 21 #### ####',\n '+55 31 #### ####',\n '+55 41 #### ####',\n '+55 51 ### ####',\n '+55 61 #### ####',\n '+55 71 #### ####',\n '+55 81 #### ####',\n '+55 (011) ####-####',\n '+55 (021) ####-####',\n '+55 (031) ####-####',\n '+55 (041) ####-####',\n '+55 (051) ####-####',\n '+55 (061) ####-####',\n '+55 (071) ####-####',\n '+55 (081) ####-####',\n '+55 11 ####-####',\n '+55 21 ####-####',\n '+55 31 ####-####',\n '+55 41 ####-####',\n '+55 51 ### ####',\n '+55 61 ####-####',\n '+55 71 ####-####',\n '+55 81 ####-####',\n '(011) #### ####',\n '(021) #### ####',\n '(031) #### ####',\n '(041) #### ####',\n '(051) #### ####',\n '(061) #### ####',\n '(071) #### ####',\n '(081) #### ####',\n '11 #### ####',\n '21 #### ####',\n '31 #### ####',\n '41 #### ####',\n '51 ### ####',\n '61 #### ####',\n '71 #### ####',\n '81 #### ####',\n '(011) ####-####',\n '(021) ####-####',\n '(031) ####-####',\n '(041) ####-####',\n '(051) ####-####',\n '(061) ####-####',\n '(071) ####-####',\n '(081) ####-####',\n '11 ####-####',\n '21 ####-####',\n '31 ####-####',\n '41 ####-####',\n '51 ### ####',\n '61 ####-####',\n '71 ####-####',\n '81 ####-####',\n '#### ####',\n '####-####',\n )\n msisdn_formats = (\n '5511#########',\n '5521#########',\n '5531#########',\n '5541#########',\n '5551#########',\n '5561#########',\n '5571#########',\n '5581#########',\n )\n", "path": "faker/providers/phone_number/pt_BR/__init__.py"}]} | 1,565 | 201 |
gh_patches_debug_11240 | rasdani/github-patches | git_diff | acl-org__acl-anthology-1025 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Name parser
At ingestion time, we are often given data that is not split into BibTeX's "surname, given name" format. We therefore split it ourselves, heuristically, which often fails. Python has a [name parser](https://pypi.org/project/nameparser/) module, but it doesn't work on all Anthology names, either, e.g.,:
- José Alejandro Lopez Gonzalez
- Philippe Boula de Mareüil
It would be cool to implement our own name parser and train it on the Anthology data. (I imagine that applying the trained model would turn up some mistakes in our data).
</issue>
<code>
[start of bin/likely_name_split.py]
1 #!/usr/bin/env python3
2 # Daniel Gildea, 2020
3
4 """Usage: likely_name_split.py [--importdir=DIR]
5
6 Counts first and last names in anthology.
7 Predicts best split into first and last.
8 Checks whether current names match our predictions.
9
10 Options:
11 --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]
12 -h, --help Display this helpful text.
13 """
14
15 from collections import defaultdict
16 from docopt import docopt
17 import re
18 import os
19 from math import *
20
21 from anthology import Anthology
22 from anthology.people import PersonName
23
24
25 class NameSplitter:
26 def __init__(self, anthology):
27 # counts of how often each name appears
28 self.first_count = defaultdict(lambda: 0) # "Maria" "Victoria"
29 self.first_full_count = defaultdict(lambda: 0) # "Maria Victoria"
30 self.last_count = defaultdict(lambda: 0) # "van" "den" "Bosch"
31 self.last_full_count = defaultdict(lambda: 0) # "van den Bosch"
32 self.first_total = 0
33 self.last_total = 0
34
35 self.count_names(anthology)
36
37 # counts names in anthology database into global vars
38 # first_count last_count (dicts)
39 # first_full_count last_full_count (dicts)
40 # first_total last_total (floats)
41 def count_names(self, anthology):
42 for person in anthology.people.personids():
43 name = anthology.people.get_canonical_name(person)
44 num_papers = len(anthology.people.get_papers(person)) + 0.0
45 # print(name.last, ", ", name.first, num_papers)
46 for w in name.first.split(" "):
47 self.first_count[w] += num_papers
48 self.first_full_count[name.first] += num_papers
49 self.first_total += num_papers
50
51 for w in name.last.split(" "):
52 self.last_count[w] += num_papers
53 self.last_full_count[name.last] += num_papers
54 self.last_total += num_papers
55
56 # takes "Maria Victoria Lopez Gonzalez"
57 # returns ("Lopez Gonzalez", "Maria Victoria")
58 # uses counts of words in first and last names in current database
59 def best_split(self, name):
60 if "," in name:
61 # Short-circuit names that are already split
62 surname, given_names = name.split(",")
63 return (surname.strip(), given_names.strip())
64
65 words = name.split(" ")
66 best_score = -inf
67 best = ("", "")
68 # loop over possible split points between first/last
69 for i in range(1, len(words)): # at least one word in each part
70 first = " ".join(words[0:i])
71 last = " ".join(words[i:])
72 # max of log prob of "Maria Victoria" and
73 # log prob of "Maria" + log prob of "Victoria"
74 first_probs = [
75 log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]
76 ]
77 first_score = max(
78 log((self.first_full_count[first] + 0.000001) / self.first_total),
79 sum(first_probs),
80 )
81 last_probs = [
82 log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]
83 ]
84 last_score = max(
85 log((self.last_full_count[last] + 0.000001) / self.last_total),
86 sum(last_probs),
87 )
88
89 if first_score + last_score > best_score:
90 best_score = first_score + last_score
91 best = (last, first)
92 # end of loop over split points
93 return best
94
95
96 if __name__ == "__main__":
97 args = docopt(__doc__)
98 scriptdir = os.path.dirname(os.path.abspath(__file__))
99 if "{scriptdir}" in args["--importdir"]:
100 args["--importdir"] = os.path.abspath(
101 args["--importdir"].format(scriptdir=scriptdir)
102 )
103
104 anthology = Anthology(importdir=args["--importdir"])
105 splitter = NameSplitter(anthology)
106
107 # for all names currently in anthology,
108 # see if they match what we predict
109 for person in anthology.people.personids():
110 name = anthology.people.get_canonical_name(person)
111
112 # find our prediction of split
113 best = splitter.best_split(name.first + " " + name.last)
114
115 # if current split does not match our prediction
116 if not (best[0] == name.last and best[1] == name.first):
117 # print suggested replacement
118 print(name.last, ",", name.first, " ==> ", best[0], ",", best[1])
119
[end of bin/likely_name_split.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/likely_name_split.py b/bin/likely_name_split.py
--- a/bin/likely_name_split.py
+++ b/bin/likely_name_split.py
@@ -57,8 +57,9 @@
# returns ("Lopez Gonzalez", "Maria Victoria")
# uses counts of words in first and last names in current database
def best_split(self, name):
- if "," in name:
+ if "," in name and not "Jr." in name:
# Short-circuit names that are already split
+ # comma in "William Baumgartner, Jr." does not count as a split
surname, given_names = name.split(",")
return (surname.strip(), given_names.strip())
| {"golden_diff": "diff --git a/bin/likely_name_split.py b/bin/likely_name_split.py\n--- a/bin/likely_name_split.py\n+++ b/bin/likely_name_split.py\n@@ -57,8 +57,9 @@\n # returns (\"Lopez Gonzalez\", \"Maria Victoria\")\n # uses counts of words in first and last names in current database\n def best_split(self, name):\n- if \",\" in name:\n+ if \",\" in name and not \"Jr.\" in name:\n # Short-circuit names that are already split\n+ # comma in \"William Baumgartner, Jr.\" does not count as a split\n surname, given_names = name.split(\",\")\n return (surname.strip(), given_names.strip())\n", "issue": "Name parser\nAt ingestion time, we are often given data that is not split into BibTeX's \"surname, given name\" format. We therefore split it ourselves, heuristically, which often fails. Python has a [name parser](https://pypi.org/project/nameparser/) module, but it doesn't work on all Anthology names, either, e.g.,:\r\n\r\n- Jos\u00e9 Alejandro Lopez Gonzalez\r\n- Philippe Boula de Mare\u00fcil\r\n\r\nIt would be cool to implement our own name parser and train it on the Anthology data. (I imagine that applying the trained model would turn up some mistakes in our data).\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Daniel Gildea, 2020\n\n\"\"\"Usage: likely_name_split.py [--importdir=DIR]\n\nCounts first and last names in anthology.\nPredicts best split into first and last.\nChecks whether current names match our predictions.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom collections import defaultdict\nfrom docopt import docopt\nimport re\nimport os\nfrom math import *\n\nfrom anthology import Anthology\nfrom anthology.people import PersonName\n\n\nclass NameSplitter:\n def __init__(self, anthology):\n # counts of how often each name appears\n self.first_count = defaultdict(lambda: 0) # \"Maria\" \"Victoria\"\n self.first_full_count = defaultdict(lambda: 0) # \"Maria Victoria\"\n self.last_count = defaultdict(lambda: 0) # \"van\" \"den\" \"Bosch\"\n self.last_full_count = defaultdict(lambda: 0) # \"van den Bosch\"\n self.first_total = 0\n self.last_total = 0\n\n self.count_names(anthology)\n\n # counts names in anthology database into global vars\n # first_count last_count (dicts)\n # first_full_count last_full_count (dicts)\n # first_total last_total (floats)\n def count_names(self, anthology):\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n num_papers = len(anthology.people.get_papers(person)) + 0.0\n # print(name.last, \", \", name.first, num_papers)\n for w in name.first.split(\" \"):\n self.first_count[w] += num_papers\n self.first_full_count[name.first] += num_papers\n self.first_total += num_papers\n\n for w in name.last.split(\" \"):\n self.last_count[w] += num_papers\n self.last_full_count[name.last] += num_papers\n self.last_total += num_papers\n\n # takes \"Maria Victoria Lopez Gonzalez\"\n # returns (\"Lopez Gonzalez\", \"Maria Victoria\")\n # uses counts of words in first and last names in current database\n def best_split(self, name):\n if \",\" in name:\n # Short-circuit names that are already split\n surname, given_names = name.split(\",\")\n return (surname.strip(), given_names.strip())\n\n words = name.split(\" \")\n best_score = -inf\n best = (\"\", \"\")\n # loop over possible split points between first/last\n for i in range(1, len(words)): # at least one word in each part\n first = \" \".join(words[0:i])\n last = \" \".join(words[i:])\n # max of log prob of \"Maria Victoria\" and\n # log prob of \"Maria\" + log prob of \"Victoria\"\n first_probs = [\n log((self.first_count[x] + 0.01) / self.first_total) for x in words[0:i]\n ]\n first_score = max(\n log((self.first_full_count[first] + 0.000001) / self.first_total),\n sum(first_probs),\n )\n last_probs = [\n log((self.last_count[x] + 0.01) / self.last_total) for x in words[i:]\n ]\n last_score = max(\n log((self.last_full_count[last] + 0.000001) / self.last_total),\n sum(last_probs),\n )\n\n if first_score + last_score > best_score:\n best_score = first_score + last_score\n best = (last, first)\n # end of loop over split points\n return best\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n\n anthology = Anthology(importdir=args[\"--importdir\"])\n splitter = NameSplitter(anthology)\n\n # for all names currently in anthology,\n # see if they match what we predict\n for person in anthology.people.personids():\n name = anthology.people.get_canonical_name(person)\n\n # find our prediction of split\n best = splitter.best_split(name.first + \" \" + name.last)\n\n # if current split does not match our prediction\n if not (best[0] == name.last and best[1] == name.first):\n # print suggested replacement\n print(name.last, \",\", name.first, \" ==> \", best[0], \",\", best[1])\n", "path": "bin/likely_name_split.py"}]} | 1,969 | 156 |
gh_patches_debug_150 | rasdani/github-patches | git_diff | ManimCommunity__manim-70 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A small Bug in setup.py
In `install_requires` of `setup.py` the library `colour` is mentioned twice. This needed to be changed.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_namespace_packages
2 setup(
3 name="manimlib",
4 version="0.2.0",
5 description="Animation engine for explanatory math videos",
6 license="MIT",
7 packages=find_namespace_packages(),
8 package_data={ "manim": ["*.tex"] },
9 entry_points={
10 "console_scripts": [
11 "manim=manim:main",
12 "manimcm=manim:main",
13 ]
14 },
15 install_requires=[
16 "colour",
17 "argparse",
18 "colour",
19 "numpy",
20 "Pillow",
21 "progressbar",
22 "scipy",
23 "tqdm",
24 "opencv-python",
25 "pycairo",
26 "pydub",
27 "pygments",
28 "pyreadline; sys_platform == 'win32'",
29 "rich"
30 ],
31 )
32
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,6 @@
]
},
install_requires=[
- "colour",
"argparse",
"colour",
"numpy",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,7 +13,6 @@\n ]\n },\n install_requires=[\n- \"colour\",\n \"argparse\",\n \"colour\",\n \"numpy\",\n", "issue": "A small Bug in setup.py\nIn `install_requires` of `setup.py` the library `colour` is mentioned twice. This needed to be changed.\n", "before_files": [{"content": "from setuptools import setup, find_namespace_packages\nsetup(\n name=\"manimlib\",\n version=\"0.2.0\",\n description=\"Animation engine for explanatory math videos\",\n license=\"MIT\",\n packages=find_namespace_packages(),\n package_data={ \"manim\": [\"*.tex\"] },\n entry_points={\n \"console_scripts\": [\n \"manim=manim:main\",\n \"manimcm=manim:main\",\n ]\n },\n install_requires=[\n \"colour\",\n \"argparse\",\n \"colour\",\n \"numpy\",\n \"Pillow\",\n \"progressbar\",\n \"scipy\",\n \"tqdm\",\n \"opencv-python\",\n \"pycairo\",\n \"pydub\",\n \"pygments\",\n \"pyreadline; sys_platform == 'win32'\",\n \"rich\"\n ],\n)\n", "path": "setup.py"}]} | 796 | 59 |
gh_patches_debug_15754 | rasdani/github-patches | git_diff | secdev__scapy-2317 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KRACK module requires python-cryptography
When `python-cryptography` or `python3-cryptography` packages are not installed, the KRACK module cannot be loaded.
```
>>> load_module("krack")
ERROR: Loading module scapy.modules.krack
Traceback (most recent call last):
File "/home/ria/scapy/scapy/main.py", line 150, in _load
mod = importlib.import_module(module)
File "/usr/lib/python3.7/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/ria/scapy/scapy/modules/krack/__init__.py", line 28, in <module>
from scapy.modules.krack.automaton import KrackAP # noqa: F401
File "/home/ria/scapy/scapy/modules/krack/automaton.py", line 7, in <module>
from cryptography.hazmat.primitives import hashes
ModuleNotFoundError: No module named 'cryptography'
```
Calling @commial to assess whether the module could still offer functionalities with a `crypto_valid` around some blocks. But at first glance I believe a missing `cryptography` should completely prevent importing the module.
https://github.com/secdev/scapy/blob/a58e1b90a704c394216a0b5a864a50931754bdf7/scapy/modules/krack/automaton.py#L6-L10
https://github.com/secdev/scapy/blob/a58e1b90a704c394216a0b5a864a50931754bdf7/scapy/modules/krack/crypto.py#L6-L9
</issue>
<code>
[start of scapy/modules/krack/__init__.py]
1 """Module implementing Krack Attack on client, as a custom WPA Access Point
2
3 More details on the attack can be found on https://www.krackattacks.com/
4
5 Example of use (from the scapy shell):
6 >>> load_module("krack")
7 >>> KrackAP(
8 iface="mon0", # A monitor interface
9 ap_mac='11:22:33:44:55:66', # MAC (BSSID) to use
10 ssid="TEST_KRACK", # SSID
11 passphrase="testtest", # Associated passphrase
12 ).run()
13
14 Then, on the target device, connect to "TEST_KRACK" using "testtest" as the
15 passphrase.
16 The output logs will indicate if one of the vulnerability have been triggered.
17
18 Outputs for vulnerable devices:
19 - IV re-use!! Client seems to be vulnerable to handshake 3/4 replay
20 (CVE-2017-13077)
21 - Broadcast packet accepted twice!! (CVE-2017-13080)
22 - Client has installed an all zero encryption key (TK)!!
23
24 For patched devices:
25 - Client is likely not vulnerable to CVE-2017-13080
26 """
27
28 from scapy.modules.krack.automaton import KrackAP # noqa: F401
29
[end of scapy/modules/krack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scapy/modules/krack/__init__.py b/scapy/modules/krack/__init__.py
--- a/scapy/modules/krack/__init__.py
+++ b/scapy/modules/krack/__init__.py
@@ -1,5 +1,7 @@
"""Module implementing Krack Attack on client, as a custom WPA Access Point
+Requires the python cryptography package v1.7+. See https://cryptography.io/
+
More details on the attack can be found on https://www.krackattacks.com/
Example of use (from the scapy shell):
@@ -25,4 +27,10 @@
- Client is likely not vulnerable to CVE-2017-13080
"""
-from scapy.modules.krack.automaton import KrackAP # noqa: F401
+from scapy.config import conf
+
+if conf.crypto_valid:
+ from scapy.modules.krack.automaton import KrackAP # noqa: F401
+else:
+ raise ImportError("Cannot import Krack module due to missing dependency. "
+ "Please install python{3}-cryptography v1.7+.")
| {"golden_diff": "diff --git a/scapy/modules/krack/__init__.py b/scapy/modules/krack/__init__.py\n--- a/scapy/modules/krack/__init__.py\n+++ b/scapy/modules/krack/__init__.py\n@@ -1,5 +1,7 @@\n \"\"\"Module implementing Krack Attack on client, as a custom WPA Access Point\n \n+Requires the python cryptography package v1.7+. See https://cryptography.io/\n+\n More details on the attack can be found on https://www.krackattacks.com/\n \n Example of use (from the scapy shell):\n@@ -25,4 +27,10 @@\n - Client is likely not vulnerable to CVE-2017-13080\n \"\"\"\n \n-from scapy.modules.krack.automaton import KrackAP # noqa: F401\n+from scapy.config import conf\n+\n+if conf.crypto_valid:\n+ from scapy.modules.krack.automaton import KrackAP # noqa: F401\n+else:\n+ raise ImportError(\"Cannot import Krack module due to missing dependency. \"\n+ \"Please install python{3}-cryptography v1.7+.\")\n", "issue": "KRACK module requires python-cryptography\nWhen `python-cryptography` or `python3-cryptography` packages are not installed, the KRACK module cannot be loaded.\r\n\r\n```\r\n>>> load_module(\"krack\")\r\nERROR: Loading module scapy.modules.krack\r\nTraceback (most recent call last):\r\n File \"/home/ria/scapy/scapy/main.py\", line 150, in _load\r\n mod = importlib.import_module(module)\r\n File \"/usr/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/ria/scapy/scapy/modules/krack/__init__.py\", line 28, in <module>\r\n from scapy.modules.krack.automaton import KrackAP # noqa: F401\r\n File \"/home/ria/scapy/scapy/modules/krack/automaton.py\", line 7, in <module>\r\n from cryptography.hazmat.primitives import hashes\r\nModuleNotFoundError: No module named 'cryptography'\r\n```\r\n\r\nCalling @commial to assess whether the module could still offer functionalities with a `crypto_valid` around some blocks. But at first glance I believe a missing `cryptography` should completely prevent importing the module.\r\n\r\nhttps://github.com/secdev/scapy/blob/a58e1b90a704c394216a0b5a864a50931754bdf7/scapy/modules/krack/automaton.py#L6-L10\r\n\r\nhttps://github.com/secdev/scapy/blob/a58e1b90a704c394216a0b5a864a50931754bdf7/scapy/modules/krack/crypto.py#L6-L9\n", "before_files": [{"content": "\"\"\"Module implementing Krack Attack on client, as a custom WPA Access Point\n\nMore details on the attack can be found on https://www.krackattacks.com/\n\nExample of use (from the scapy shell):\n>>> load_module(\"krack\")\n>>> KrackAP(\n iface=\"mon0\", # A monitor interface\n ap_mac='11:22:33:44:55:66', # MAC (BSSID) to use\n ssid=\"TEST_KRACK\", # SSID\n passphrase=\"testtest\", # Associated passphrase\n).run()\n\nThen, on the target device, connect to \"TEST_KRACK\" using \"testtest\" as the\npassphrase.\nThe output logs will indicate if one of the vulnerability have been triggered.\n\nOutputs for vulnerable devices:\n- IV re-use!! Client seems to be vulnerable to handshake 3/4 replay\n (CVE-2017-13077)\n- Broadcast packet accepted twice!! (CVE-2017-13080)\n- Client has installed an all zero encryption key (TK)!!\n\nFor patched devices:\n- Client is likely not vulnerable to CVE-2017-13080\n\"\"\"\n\nfrom scapy.modules.krack.automaton import KrackAP # noqa: F401\n", "path": "scapy/modules/krack/__init__.py"}]} | 1,421 | 259 |
gh_patches_debug_5192 | rasdani/github-patches | git_diff | cowrie__cowrie-1421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
$UID not spitting out UID
**Describe the bug**
A hacker found a way to get inside the cowrie box and executed "echo $UID". The reply that came back was empty, and the hacker disconnected. My normal box returned the UID.
**To Reproduce**
Steps to reproduce the behavior:
1. Connect to linux machine
2. Type in "Echo $UID" (response should be something like 1001 or 0)
3. Connect to cowrie
4. Type in "echo $UID"
5. See nothing replied on screen.
**Expected behavior**
When someone uses the variable $UID, get the variable from the honeyfs/etc/passwd file or return 0.
**Server (please complete the following information):**
- OS: Linux nanopineo2 5.8.6-sunxi64 #20.08.2 SMP Fri Sep 4 08:52:31 CEST 2020 aarch64 GNU/Linux
- Python: Python 3.7.3
**Additional context**
My hackers seem to get smarter each day...
</issue>
<code>
[start of src/cowrie/shell/session.py]
1 # Copyright (c) 2009-2014 Upi Tamminen <[email protected]>
2 # See the COPYRIGHT file for more information
3
4 from __future__ import absolute_import, division
5
6 from twisted.conch.interfaces import ISession
7 from twisted.conch.ssh import session
8 from twisted.python import log
9
10 from zope.interface import implementer
11
12 from cowrie.insults import insults
13 from cowrie.shell import protocol
14
15
16 @implementer(ISession)
17 class SSHSessionForCowrieUser(object):
18
19 def __init__(self, avatar, reactor=None):
20 """
21 Construct an C{SSHSessionForCowrieUser}.
22
23 @param avatar: The L{CowrieUser} for whom this is an SSH session.
24 @param reactor: An L{IReactorProcess} used to handle shell and exec
25 requests. Uses the default reactor if None.
26 """
27 self.protocol = None
28 self.avatar = avatar
29 self.server = avatar.server
30 self.uid = avatar.uid
31 self.gid = avatar.gid
32 self.username = avatar.username
33 self.environ = {
34 'LOGNAME': self.username,
35 'SHELL': '/bin/bash',
36 'USER': self.username,
37 'HOME': self.avatar.home,
38 'TMOUT': '1800',
39 'UID': self.uid}
40 if self.uid == 0:
41 self.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
42 else:
43 self.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games'
44
45 self.server.initFileSystem()
46
47 if self.avatar.temporary:
48 self.server.fs.mkdir(self.avatar.home, self.uid, self.gid, 4096, 755)
49
50 def openShell(self, processprotocol):
51 self.protocol = insults.LoggingServerProtocol(
52 protocol.HoneyPotInteractiveProtocol, self)
53 self.protocol.makeConnection(processprotocol)
54 processprotocol.makeConnection(session.wrapProtocol(self.protocol))
55
56 def getPty(self, terminal, windowSize, attrs):
57 self.environ['TERM'] = terminal.decode("utf-8")
58 log.msg(
59 eventid='cowrie.client.size',
60 width=windowSize[1],
61 height=windowSize[0],
62 format='Terminal Size: %(width)s %(height)s'
63 )
64 self.windowSize = windowSize
65 return None
66
67 def execCommand(self, processprotocol, cmd):
68 self.protocol = insults.LoggingServerProtocol(
69 protocol.HoneyPotExecProtocol, self, cmd)
70 self.protocol.makeConnection(processprotocol)
71 processprotocol.makeConnection(session.wrapProtocol(self.protocol))
72
73 def closed(self):
74 """
75 this is reliably called on both logout and disconnect
76 we notify the protocol here we lost the connection
77 """
78 if self.protocol:
79 self.protocol.connectionLost("disconnected")
80 self.protocol = None
81
82 def eofReceived(self):
83 if self.protocol:
84 self.protocol.eofReceived()
85
86 def windowChanged(self, windowSize):
87 self.windowSize = windowSize
88
[end of src/cowrie/shell/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cowrie/shell/session.py b/src/cowrie/shell/session.py
--- a/src/cowrie/shell/session.py
+++ b/src/cowrie/shell/session.py
@@ -36,7 +36,7 @@
'USER': self.username,
'HOME': self.avatar.home,
'TMOUT': '1800',
- 'UID': self.uid}
+ 'UID': str(self.uid)}
if self.uid == 0:
self.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
else:
| {"golden_diff": "diff --git a/src/cowrie/shell/session.py b/src/cowrie/shell/session.py\n--- a/src/cowrie/shell/session.py\n+++ b/src/cowrie/shell/session.py\n@@ -36,7 +36,7 @@\n 'USER': self.username,\n 'HOME': self.avatar.home,\n 'TMOUT': '1800',\n- 'UID': self.uid}\n+ 'UID': str(self.uid)}\n if self.uid == 0:\n self.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'\n else:\n", "issue": "$UID not spitting out UID\n**Describe the bug**\r\nA hacker found a way to get inside the cowrie box and executed \"echo $UID\". The reply that came back was empty, and the hacker disconnected. My normal box returned the UID.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Connect to linux machine\r\n2. Type in \"Echo $UID\" (response should be something like 1001 or 0)\r\n3. Connect to cowrie\r\n4. Type in \"echo $UID\"\r\n5. See nothing replied on screen.\r\n\r\n**Expected behavior**\r\nWhen someone uses the variable $UID, get the variable from the honeyfs/etc/passwd file or return 0.\r\n\r\n**Server (please complete the following information):**\r\n - OS: Linux nanopineo2 5.8.6-sunxi64 #20.08.2 SMP Fri Sep 4 08:52:31 CEST 2020 aarch64 GNU/Linux\r\n - Python: Python 3.7.3\r\n\r\n**Additional context**\r\nMy hackers seem to get smarter each day...\r\n\n", "before_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import absolute_import, division\n\nfrom twisted.conch.interfaces import ISession\nfrom twisted.conch.ssh import session\nfrom twisted.python import log\n\nfrom zope.interface import implementer\n\nfrom cowrie.insults import insults\nfrom cowrie.shell import protocol\n\n\n@implementer(ISession)\nclass SSHSessionForCowrieUser(object):\n\n def __init__(self, avatar, reactor=None):\n \"\"\"\n Construct an C{SSHSessionForCowrieUser}.\n\n @param avatar: The L{CowrieUser} for whom this is an SSH session.\n @param reactor: An L{IReactorProcess} used to handle shell and exec\n requests. Uses the default reactor if None.\n \"\"\"\n self.protocol = None\n self.avatar = avatar\n self.server = avatar.server\n self.uid = avatar.uid\n self.gid = avatar.gid\n self.username = avatar.username\n self.environ = {\n 'LOGNAME': self.username,\n 'SHELL': '/bin/bash',\n 'USER': self.username,\n 'HOME': self.avatar.home,\n 'TMOUT': '1800',\n 'UID': self.uid}\n if self.uid == 0:\n self.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'\n else:\n self.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games'\n\n self.server.initFileSystem()\n\n if self.avatar.temporary:\n self.server.fs.mkdir(self.avatar.home, self.uid, self.gid, 4096, 755)\n\n def openShell(self, processprotocol):\n self.protocol = insults.LoggingServerProtocol(\n protocol.HoneyPotInteractiveProtocol, self)\n self.protocol.makeConnection(processprotocol)\n processprotocol.makeConnection(session.wrapProtocol(self.protocol))\n\n def getPty(self, terminal, windowSize, attrs):\n self.environ['TERM'] = terminal.decode(\"utf-8\")\n log.msg(\n eventid='cowrie.client.size',\n width=windowSize[1],\n height=windowSize[0],\n format='Terminal Size: %(width)s %(height)s'\n )\n self.windowSize = windowSize\n return None\n\n def execCommand(self, processprotocol, cmd):\n self.protocol = insults.LoggingServerProtocol(\n protocol.HoneyPotExecProtocol, self, cmd)\n self.protocol.makeConnection(processprotocol)\n processprotocol.makeConnection(session.wrapProtocol(self.protocol))\n\n def closed(self):\n \"\"\"\n this is reliably called on both logout and disconnect\n we notify the protocol here we lost the connection\n \"\"\"\n if self.protocol:\n self.protocol.connectionLost(\"disconnected\")\n self.protocol = None\n\n def eofReceived(self):\n if self.protocol:\n self.protocol.eofReceived()\n\n def windowChanged(self, windowSize):\n self.windowSize = windowSize\n", "path": "src/cowrie/shell/session.py"}]} | 1,612 | 137 |
gh_patches_debug_21925 | rasdani/github-patches | git_diff | Parsl__parsl-2753 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Link to or provide instructions for example Perlmutter config
Since Cori is now retired, it can be removed from the [Configuration](https://parsl.readthedocs.io/en/stable/userguide/configuring.html?highlight=nersc#cori-nersc) section of the docs. In its place, it would be worthwhile to add (or link to) an example config for Perlmutter at NERSC, the details of which can be found [here](https://docs.nersc.gov/jobs/workflow/parsl/).
</issue>
<code>
[start of parsl/configs/cori.py]
1 from parsl.config import Config
2 from parsl.providers import SlurmProvider
3 from parsl.launchers import SrunLauncher
4 from parsl.executors import HighThroughputExecutor
5 from parsl.addresses import address_by_interface
6
7
8 config = Config(
9 executors=[
10 HighThroughputExecutor(
11 label='Cori_HTEX_multinode',
12 # This is the network interface on the login node to
13 # which compute nodes can communicate
14 address=address_by_interface('bond0.144'),
15 cores_per_worker=2,
16 provider=SlurmProvider(
17 'regular', # Partition / QOS
18 nodes_per_block=2,
19 init_blocks=1,
20 # string to prepend to #SBATCH blocks in the submit
21 # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
22 scheduler_options='',
23 # Command to be run before starting a worker, such as:
24 # 'module load Anaconda; source activate parsl_env'.
25 worker_init='',
26 # We request all hyperthreads on a node.
27 launcher=SrunLauncher(overrides='-c 272'),
28 walltime='00:10:00',
29 # Slurm scheduler on Cori can be slow at times,
30 # increase the command timeouts
31 cmd_timeout=120,
32 ),
33 )
34 ]
35 )
36
[end of parsl/configs/cori.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/configs/cori.py b/parsl/configs/cori.py
deleted file mode 100644
--- a/parsl/configs/cori.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from parsl.config import Config
-from parsl.providers import SlurmProvider
-from parsl.launchers import SrunLauncher
-from parsl.executors import HighThroughputExecutor
-from parsl.addresses import address_by_interface
-
-
-config = Config(
- executors=[
- HighThroughputExecutor(
- label='Cori_HTEX_multinode',
- # This is the network interface on the login node to
- # which compute nodes can communicate
- address=address_by_interface('bond0.144'),
- cores_per_worker=2,
- provider=SlurmProvider(
- 'regular', # Partition / QOS
- nodes_per_block=2,
- init_blocks=1,
- # string to prepend to #SBATCH blocks in the submit
- # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
- scheduler_options='',
- # Command to be run before starting a worker, such as:
- # 'module load Anaconda; source activate parsl_env'.
- worker_init='',
- # We request all hyperthreads on a node.
- launcher=SrunLauncher(overrides='-c 272'),
- walltime='00:10:00',
- # Slurm scheduler on Cori can be slow at times,
- # increase the command timeouts
- cmd_timeout=120,
- ),
- )
- ]
-)
| {"golden_diff": "diff --git a/parsl/configs/cori.py b/parsl/configs/cori.py\ndeleted file mode 100644\n--- a/parsl/configs/cori.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-from parsl.config import Config\n-from parsl.providers import SlurmProvider\n-from parsl.launchers import SrunLauncher\n-from parsl.executors import HighThroughputExecutor\n-from parsl.addresses import address_by_interface\n-\n-\n-config = Config(\n- executors=[\n- HighThroughputExecutor(\n- label='Cori_HTEX_multinode',\n- # This is the network interface on the login node to\n- # which compute nodes can communicate\n- address=address_by_interface('bond0.144'),\n- cores_per_worker=2,\n- provider=SlurmProvider(\n- 'regular', # Partition / QOS\n- nodes_per_block=2,\n- init_blocks=1,\n- # string to prepend to #SBATCH blocks in the submit\n- # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'\n- scheduler_options='',\n- # Command to be run before starting a worker, such as:\n- # 'module load Anaconda; source activate parsl_env'.\n- worker_init='',\n- # We request all hyperthreads on a node.\n- launcher=SrunLauncher(overrides='-c 272'),\n- walltime='00:10:00',\n- # Slurm scheduler on Cori can be slow at times,\n- # increase the command timeouts\n- cmd_timeout=120,\n- ),\n- )\n- ]\n-)\n", "issue": "Link to or provide instructions for example Perlmutter config\nSince Cori is now retired, it can be removed from the [Configuration](https://parsl.readthedocs.io/en/stable/userguide/configuring.html?highlight=nersc#cori-nersc) section of the docs. In its place, it would be worthwhile to add (or link to) an example config for Perlmutter at NERSC, the details of which can be found [here](https://docs.nersc.gov/jobs/workflow/parsl/).\n", "before_files": [{"content": "from parsl.config import Config\nfrom parsl.providers import SlurmProvider\nfrom parsl.launchers import SrunLauncher\nfrom parsl.executors import HighThroughputExecutor\nfrom parsl.addresses import address_by_interface\n\n\nconfig = Config(\n executors=[\n HighThroughputExecutor(\n label='Cori_HTEX_multinode',\n # This is the network interface on the login node to\n # which compute nodes can communicate\n address=address_by_interface('bond0.144'),\n cores_per_worker=2,\n provider=SlurmProvider(\n 'regular', # Partition / QOS\n nodes_per_block=2,\n init_blocks=1,\n # string to prepend to #SBATCH blocks in the submit\n # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'\n scheduler_options='',\n # Command to be run before starting a worker, such as:\n # 'module load Anaconda; source activate parsl_env'.\n worker_init='',\n # We request all hyperthreads on a node.\n launcher=SrunLauncher(overrides='-c 272'),\n walltime='00:10:00',\n # Slurm scheduler on Cori can be slow at times,\n # increase the command timeouts\n cmd_timeout=120,\n ),\n )\n ]\n)\n", "path": "parsl/configs/cori.py"}]} | 1,005 | 376 |
gh_patches_debug_127 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-6232 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Re-generate library using tasks/synth.py
This PR was created by autosynth.
</issue>
<code>
[start of tasks/synth.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script is used to synthesize generated parts of this library."""
16
17 import synthtool as s
18 import synthtool.gcp as gcp
19 import logging
20
21 logging.basicConfig(level=logging.DEBUG)
22
23 gapic = gcp.GAPICGenerator()
24 common = gcp.CommonTemplates()
25 excludes = [
26 'README.rst',
27 'setup.py',
28 'docs/conf.py',
29 'docs/index.rst',
30 ]
31
32 for version in ['v2beta2', 'v2beta3']:
33 library = gapic.py_library(
34 'tasks', version,
35 config_path=f'artman_cloudtasks_{version}.yaml')
36
37 s.copy(library, excludes=excludes)
38
39 # Fix unindentation of bullet list second line
40 s.replace(
41 f'google/cloud/tasks_{version}/gapic/cloud_tasks_client.py',
42 '( \* .*\n )([^\s*])',
43 '\g<1> \g<2>')
44
45 s.replace(
46 f'google/cloud/tasks_{version}/gapic/cloud_tasks_client.py',
47 '(Google IAM .*?_) ',
48 '\g<1>_ ')
49
50 # Issues with Anonymous ('__') links. Change to named.
51 s.replace(
52 f"google/cloud/tasks_{version}/proto/*.py",
53 ">`__",
54 ">`_")
55
56 # Issue in v2beta2
57 s.replace(
58 f'google/cloud/tasks_v2beta2/gapic/cloud_tasks_client.py',
59 r'(Sample filter \\"app_engine_http_target: )\*\\".',
60 '\g<1>\\*\\".')
61
62 # Wrapped link fails due to space in link (v2beta2)
63 s.replace(
64 f"google/cloud/tasks_v2beta2/proto/queue_pb2.py",
65 '(uests in queue.yaml/xml) <\n\s+',
66 '\g<1>\n <')
67
[end of tasks/synth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tasks/synth.py b/tasks/synth.py
--- a/tasks/synth.py
+++ b/tasks/synth.py
@@ -25,6 +25,7 @@
excludes = [
'README.rst',
'setup.py',
+ 'nox*.py',
'docs/conf.py',
'docs/index.rst',
]
| {"golden_diff": "diff --git a/tasks/synth.py b/tasks/synth.py\n--- a/tasks/synth.py\n+++ b/tasks/synth.py\n@@ -25,6 +25,7 @@\n excludes = [\n 'README.rst',\n 'setup.py',\n+ 'nox*.py',\n 'docs/conf.py',\n 'docs/index.rst',\n ]\n", "issue": "Re-generate library using tasks/synth.py\nThis PR was created by autosynth.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport synthtool as s\nimport synthtool.gcp as gcp\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\ngapic = gcp.GAPICGenerator()\ncommon = gcp.CommonTemplates()\nexcludes = [\n 'README.rst',\n 'setup.py',\n 'docs/conf.py',\n 'docs/index.rst',\n]\n\nfor version in ['v2beta2', 'v2beta3']:\n library = gapic.py_library(\n 'tasks', version,\n config_path=f'artman_cloudtasks_{version}.yaml')\n\n s.copy(library, excludes=excludes)\n\n # Fix unindentation of bullet list second line\n s.replace(\n f'google/cloud/tasks_{version}/gapic/cloud_tasks_client.py',\n '( \\* .*\\n )([^\\s*])',\n '\\g<1> \\g<2>')\n\n s.replace(\n f'google/cloud/tasks_{version}/gapic/cloud_tasks_client.py',\n '(Google IAM .*?_) ',\n '\\g<1>_ ')\n\n # Issues with Anonymous ('__') links. Change to named.\n s.replace(\n f\"google/cloud/tasks_{version}/proto/*.py\",\n \">`__\",\n \">`_\")\n\n# Issue in v2beta2\ns.replace(\n f'google/cloud/tasks_v2beta2/gapic/cloud_tasks_client.py',\n r'(Sample filter \\\\\"app_engine_http_target: )\\*\\\\\".',\n '\\g<1>\\\\*\\\\\".')\n\n# Wrapped link fails due to space in link (v2beta2)\ns.replace(\n f\"google/cloud/tasks_v2beta2/proto/queue_pb2.py\",\n '(uests in queue.yaml/xml) <\\n\\s+',\n '\\g<1>\\n <')\n", "path": "tasks/synth.py"}]} | 1,206 | 76 |
gh_patches_debug_35367 | rasdani/github-patches | git_diff | mlflow__mlflow-9378 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable `PT011`
### Summary
Enable https://beta.ruff.rs/docs/rules/pytest-raises-too-broad.
```diff
diff --git a/pyproject.toml b/pyproject.toml
index c373b48ca..8b7810c04 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -25,6 +25,7 @@ select = [
"PT009",
"PT010",
"PT012",
+ "PT011",
"PT013",
"PT018",
"PT022",
@@ -72,6 +73,7 @@ extend-exclude = [
[tool.ruff.flake8-pytest-style]
mark-parentheses = false
fixture-parentheses = false
+raises-require-match-for = ["*"]
[tool.ruff.flake8-tidy-imports]
ban-relative-imports = "all"
```
- `raises-require-match-for = ["*"]` means all errors require `match`.
### Notes
- Make sure to open a PR from a **non-master** branch.
- Sign off the commit using the `-s` flag when making a commit:
```sh
git commit -s -m "..."
# ^^ make sure to use this
```
- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.
</issue>
<code>
[start of pylint_plugins/__init__.py]
1 from pylint_plugins.pytest_raises_checker import PytestRaisesChecker
2 from pylint_plugins.unittest_assert_raises import UnittestAssertRaises
3 from pylint_plugins.import_checker import ImportChecker
4 from pylint_plugins.assign_checker import AssignChecker
5
6
7 def register(linter):
8 linter.register_checker(PytestRaisesChecker(linter))
9 linter.register_checker(UnittestAssertRaises(linter))
10 linter.register_checker(ImportChecker(linter))
11 linter.register_checker(AssignChecker(linter))
12
[end of pylint_plugins/__init__.py]
[start of pylint_plugins/errors.py]
1 from typing import NamedTuple, Dict, Tuple
2 from functools import reduce
3
4
5 class Message(NamedTuple):
6 id: str
7 name: str
8 message: str
9 reason: str
10
11 def to_dict(self) -> Dict[str, Tuple[str, str, str]]:
12 return {self.id: (self.message, self.name, self.reason)}
13
14
15 def to_msgs(*messages: Message) -> Dict[str, Tuple[str, str, str]]:
16 return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})
17
18
19 PYTEST_RAISES_WITHOUT_MATCH = Message(
20 id="W0001",
21 name="pytest-raises-without-match",
22 message="`pytest.raises` must be called with `match` argument`.",
23 reason="`pytest.raises` without `match` argument can lead to false positives.",
24 )
25
26
27 UNITTEST_PYTEST_RAISES = Message(
28 id="W0003",
29 name="unittest-assert-raises",
30 message="Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.",
31 reason="To enforce 'pytest-raises-multiple-statements' Message.",
32 )
33
34
35 LAZY_BUILTIN_IMPORT = Message(
36 id="W0007",
37 name="lazy-builtin-import",
38 message="Import built-in module(s) (%s) at the top of the file.",
39 reason="There is no reason they should be imported inside a function.",
40 )
41
42 USELESS_ASSIGNMENT = Message(
43 id="W0008",
44 name="useless-assignment",
45 message="Useless assignment. Use immediate return instead.",
46 reason="For simplicity and readability",
47 )
48
[end of pylint_plugins/errors.py]
[start of pylint_plugins/pytest_raises_checker/__init__.py]
1 import astroid
2 from pylint.interfaces import IAstroidChecker
3 from pylint.checkers import BaseChecker
4
5 from pylint_plugins.errors import PYTEST_RAISES_WITHOUT_MATCH, to_msgs
6
7
8 def _is_pytest_raises_call(node: astroid.NodeNG):
9 if not isinstance(node, astroid.Call):
10 return False
11 if not isinstance(node.func, astroid.Attribute) or not isinstance(node.func.expr, astroid.Name):
12 return False
13 return node.func.expr.name == "pytest" and node.func.attrname == "raises"
14
15
16 def _called_with_match(node: astroid.Call):
17 # Note `match` is a keyword-only argument:
18 # https://docs.pytest.org/en/latest/reference/reference.html#pytest.raises
19 return any(k.arg == "match" for k in node.keywords)
20
21
22 def _contains_multiple_statements(raises_with: astroid.With):
23 return len(raises_with.body) > 1
24
25
26 class PytestRaisesChecker(BaseChecker):
27 __implements__ = IAstroidChecker
28
29 name = "pytest-raises-checker"
30 msgs = to_msgs(PYTEST_RAISES_WITHOUT_MATCH)
31 priority = -1
32
33 def visit_call(self, node: astroid.Call):
34 if not _is_pytest_raises_call(node):
35 return
36
37 if not _called_with_match(node):
38 self.add_message(PYTEST_RAISES_WITHOUT_MATCH.name, node=node)
39
[end of pylint_plugins/pytest_raises_checker/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pylint_plugins/__init__.py b/pylint_plugins/__init__.py
--- a/pylint_plugins/__init__.py
+++ b/pylint_plugins/__init__.py
@@ -1,11 +1,9 @@
-from pylint_plugins.pytest_raises_checker import PytestRaisesChecker
from pylint_plugins.unittest_assert_raises import UnittestAssertRaises
from pylint_plugins.import_checker import ImportChecker
from pylint_plugins.assign_checker import AssignChecker
def register(linter):
- linter.register_checker(PytestRaisesChecker(linter))
linter.register_checker(UnittestAssertRaises(linter))
linter.register_checker(ImportChecker(linter))
linter.register_checker(AssignChecker(linter))
diff --git a/pylint_plugins/errors.py b/pylint_plugins/errors.py
--- a/pylint_plugins/errors.py
+++ b/pylint_plugins/errors.py
@@ -16,14 +16,6 @@
return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})
-PYTEST_RAISES_WITHOUT_MATCH = Message(
- id="W0001",
- name="pytest-raises-without-match",
- message="`pytest.raises` must be called with `match` argument`.",
- reason="`pytest.raises` without `match` argument can lead to false positives.",
-)
-
-
UNITTEST_PYTEST_RAISES = Message(
id="W0003",
name="unittest-assert-raises",
diff --git a/pylint_plugins/pytest_raises_checker/__init__.py b/pylint_plugins/pytest_raises_checker/__init__.py
deleted file mode 100644
--- a/pylint_plugins/pytest_raises_checker/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import astroid
-from pylint.interfaces import IAstroidChecker
-from pylint.checkers import BaseChecker
-
-from pylint_plugins.errors import PYTEST_RAISES_WITHOUT_MATCH, to_msgs
-
-
-def _is_pytest_raises_call(node: astroid.NodeNG):
- if not isinstance(node, astroid.Call):
- return False
- if not isinstance(node.func, astroid.Attribute) or not isinstance(node.func.expr, astroid.Name):
- return False
- return node.func.expr.name == "pytest" and node.func.attrname == "raises"
-
-
-def _called_with_match(node: astroid.Call):
- # Note `match` is a keyword-only argument:
- # https://docs.pytest.org/en/latest/reference/reference.html#pytest.raises
- return any(k.arg == "match" for k in node.keywords)
-
-
-def _contains_multiple_statements(raises_with: astroid.With):
- return len(raises_with.body) > 1
-
-
-class PytestRaisesChecker(BaseChecker):
- __implements__ = IAstroidChecker
-
- name = "pytest-raises-checker"
- msgs = to_msgs(PYTEST_RAISES_WITHOUT_MATCH)
- priority = -1
-
- def visit_call(self, node: astroid.Call):
- if not _is_pytest_raises_call(node):
- return
-
- if not _called_with_match(node):
- self.add_message(PYTEST_RAISES_WITHOUT_MATCH.name, node=node)
| {"golden_diff": "diff --git a/pylint_plugins/__init__.py b/pylint_plugins/__init__.py\n--- a/pylint_plugins/__init__.py\n+++ b/pylint_plugins/__init__.py\n@@ -1,11 +1,9 @@\n-from pylint_plugins.pytest_raises_checker import PytestRaisesChecker\n from pylint_plugins.unittest_assert_raises import UnittestAssertRaises\n from pylint_plugins.import_checker import ImportChecker\n from pylint_plugins.assign_checker import AssignChecker\n \n \n def register(linter):\n- linter.register_checker(PytestRaisesChecker(linter))\n linter.register_checker(UnittestAssertRaises(linter))\n linter.register_checker(ImportChecker(linter))\n linter.register_checker(AssignChecker(linter))\ndiff --git a/pylint_plugins/errors.py b/pylint_plugins/errors.py\n--- a/pylint_plugins/errors.py\n+++ b/pylint_plugins/errors.py\n@@ -16,14 +16,6 @@\n return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})\n \n \n-PYTEST_RAISES_WITHOUT_MATCH = Message(\n- id=\"W0001\",\n- name=\"pytest-raises-without-match\",\n- message=\"`pytest.raises` must be called with `match` argument`.\",\n- reason=\"`pytest.raises` without `match` argument can lead to false positives.\",\n-)\n-\n-\n UNITTEST_PYTEST_RAISES = Message(\n id=\"W0003\",\n name=\"unittest-assert-raises\",\ndiff --git a/pylint_plugins/pytest_raises_checker/__init__.py b/pylint_plugins/pytest_raises_checker/__init__.py\ndeleted file mode 100644\n--- a/pylint_plugins/pytest_raises_checker/__init__.py\n+++ /dev/null\n@@ -1,38 +0,0 @@\n-import astroid\n-from pylint.interfaces import IAstroidChecker\n-from pylint.checkers import BaseChecker\n-\n-from pylint_plugins.errors import PYTEST_RAISES_WITHOUT_MATCH, to_msgs\n-\n-\n-def _is_pytest_raises_call(node: astroid.NodeNG):\n- if not isinstance(node, astroid.Call):\n- return False\n- if not isinstance(node.func, astroid.Attribute) or not isinstance(node.func.expr, astroid.Name):\n- return False\n- return node.func.expr.name == \"pytest\" and node.func.attrname == \"raises\"\n-\n-\n-def _called_with_match(node: astroid.Call):\n- # Note `match` is a keyword-only argument:\n- # https://docs.pytest.org/en/latest/reference/reference.html#pytest.raises\n- return any(k.arg == \"match\" for k in node.keywords)\n-\n-\n-def _contains_multiple_statements(raises_with: astroid.With):\n- return len(raises_with.body) > 1\n-\n-\n-class PytestRaisesChecker(BaseChecker):\n- __implements__ = IAstroidChecker\n-\n- name = \"pytest-raises-checker\"\n- msgs = to_msgs(PYTEST_RAISES_WITHOUT_MATCH)\n- priority = -1\n-\n- def visit_call(self, node: astroid.Call):\n- if not _is_pytest_raises_call(node):\n- return\n-\n- if not _called_with_match(node):\n- self.add_message(PYTEST_RAISES_WITHOUT_MATCH.name, node=node)\n", "issue": "Enable `PT011`\n### Summary\r\n\r\nEnable https://beta.ruff.rs/docs/rules/pytest-raises-too-broad.\r\n\r\n```diff\r\ndiff --git a/pyproject.toml b/pyproject.toml\r\nindex c373b48ca..8b7810c04 100644\r\n--- a/pyproject.toml\r\n+++ b/pyproject.toml\r\n@@ -25,6 +25,7 @@ select = [\r\n \"PT009\",\r\n \"PT010\",\r\n \"PT012\",\r\n+ \"PT011\",\r\n \"PT013\",\r\n \"PT018\",\r\n \"PT022\",\r\n@@ -72,6 +73,7 @@ extend-exclude = [\r\n [tool.ruff.flake8-pytest-style]\r\n mark-parentheses = false\r\n fixture-parentheses = false\r\n+raises-require-match-for = [\"*\"]\r\n \r\n [tool.ruff.flake8-tidy-imports]\r\n ban-relative-imports = \"all\"\r\n```\r\n\r\n- `raises-require-match-for = [\"*\"]` means all errors require `match`.\r\n\r\n### Notes\r\n\r\n- Make sure to open a PR from a **non-master** branch.\r\n- Sign off the commit using the `-s` flag when making a commit:\r\n\r\n ```sh\r\n git commit -s -m \"...\"\r\n # ^^ make sure to use this\r\n ```\r\n\r\n- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.\r\n\n", "before_files": [{"content": "from pylint_plugins.pytest_raises_checker import PytestRaisesChecker\nfrom pylint_plugins.unittest_assert_raises import UnittestAssertRaises\nfrom pylint_plugins.import_checker import ImportChecker\nfrom pylint_plugins.assign_checker import AssignChecker\n\n\ndef register(linter):\n linter.register_checker(PytestRaisesChecker(linter))\n linter.register_checker(UnittestAssertRaises(linter))\n linter.register_checker(ImportChecker(linter))\n linter.register_checker(AssignChecker(linter))\n", "path": "pylint_plugins/__init__.py"}, {"content": "from typing import NamedTuple, Dict, Tuple\nfrom functools import reduce\n\n\nclass Message(NamedTuple):\n id: str\n name: str\n message: str\n reason: str\n\n def to_dict(self) -> Dict[str, Tuple[str, str, str]]:\n return {self.id: (self.message, self.name, self.reason)}\n\n\ndef to_msgs(*messages: Message) -> Dict[str, Tuple[str, str, str]]:\n return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})\n\n\nPYTEST_RAISES_WITHOUT_MATCH = Message(\n id=\"W0001\",\n name=\"pytest-raises-without-match\",\n message=\"`pytest.raises` must be called with `match` argument`.\",\n reason=\"`pytest.raises` without `match` argument can lead to false positives.\",\n)\n\n\nUNITTEST_PYTEST_RAISES = Message(\n id=\"W0003\",\n name=\"unittest-assert-raises\",\n message=\"Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.\",\n reason=\"To enforce 'pytest-raises-multiple-statements' Message.\",\n)\n\n\nLAZY_BUILTIN_IMPORT = Message(\n id=\"W0007\",\n name=\"lazy-builtin-import\",\n message=\"Import built-in module(s) (%s) at the top of the file.\",\n reason=\"There is no reason they should be imported inside a function.\",\n)\n\nUSELESS_ASSIGNMENT = Message(\n id=\"W0008\",\n name=\"useless-assignment\",\n message=\"Useless assignment. Use immediate return instead.\",\n reason=\"For simplicity and readability\",\n)\n", "path": "pylint_plugins/errors.py"}, {"content": "import astroid\nfrom pylint.interfaces import IAstroidChecker\nfrom pylint.checkers import BaseChecker\n\nfrom pylint_plugins.errors import PYTEST_RAISES_WITHOUT_MATCH, to_msgs\n\n\ndef _is_pytest_raises_call(node: astroid.NodeNG):\n if not isinstance(node, astroid.Call):\n return False\n if not isinstance(node.func, astroid.Attribute) or not isinstance(node.func.expr, astroid.Name):\n return False\n return node.func.expr.name == \"pytest\" and node.func.attrname == \"raises\"\n\n\ndef _called_with_match(node: astroid.Call):\n # Note `match` is a keyword-only argument:\n # https://docs.pytest.org/en/latest/reference/reference.html#pytest.raises\n return any(k.arg == \"match\" for k in node.keywords)\n\n\ndef _contains_multiple_statements(raises_with: astroid.With):\n return len(raises_with.body) > 1\n\n\nclass PytestRaisesChecker(BaseChecker):\n __implements__ = IAstroidChecker\n\n name = \"pytest-raises-checker\"\n msgs = to_msgs(PYTEST_RAISES_WITHOUT_MATCH)\n priority = -1\n\n def visit_call(self, node: astroid.Call):\n if not _is_pytest_raises_call(node):\n return\n\n if not _called_with_match(node):\n self.add_message(PYTEST_RAISES_WITHOUT_MATCH.name, node=node)\n", "path": "pylint_plugins/pytest_raises_checker/__init__.py"}]} | 1,845 | 711 |
gh_patches_debug_12537 | rasdani/github-patches | git_diff | Parsl__parsl-3238 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
parsl.providers.cluster_provider _write_submit_script should return nothing, rather than constant True
**Describe the bug**
_write_submit_script in parsl.providers.cluster_provider indicates failure by raising an exception, and so should not be returning a True (or False) value. Instead it should return None by either `return` on its own, or falling off the end of the method without a return statement.
To tidy this up, change that return handling. Edit the docstring to match.
</issue>
<code>
[start of parsl/providers/cluster_provider.py]
1 import logging
2 from abc import abstractmethod
3 from string import Template
4
5 from parsl.providers.errors import SchedulerMissingArgs, ScriptPathError
6 from parsl.launchers.base import Launcher
7 from parsl.launchers.errors import BadLauncher
8 from parsl.providers.base import ExecutionProvider
9
10 logger = logging.getLogger(__name__)
11
12
13 class ClusterProvider(ExecutionProvider):
14 """ This class defines behavior common to all cluster/supercompute-style scheduler systems.
15
16 Parameters
17 ----------
18 label : str
19 Label for this provider.
20 channel : Channel
21 Channel for accessing this provider. Possible channels include
22 :class:`~parsl.channels.LocalChannel` (the default),
23 :class:`~parsl.channels.SSHChannel`, or
24 :class:`~parsl.channels.SSHInteractiveLoginChannel`.
25 walltime : str
26 Walltime requested per block in HH:MM:SS.
27 launcher : Launcher
28 Launcher for this provider.
29 cmd_timeout : int
30 Timeout for commands made to the scheduler in seconds
31
32 .. code:: python
33
34 +------------------
35 |
36 script_string ------->| submit
37 id <--------|---+
38 |
39 [ ids ] ------->| status
40 [statuses] <--------|----+
41 |
42 [ ids ] ------->| cancel
43 [cancel] <--------|----+
44 |
45 +-------------------
46 """
47
48 def __init__(self,
49 label,
50 channel,
51 nodes_per_block,
52 init_blocks,
53 min_blocks,
54 max_blocks,
55 parallelism,
56 walltime,
57 launcher,
58 cmd_timeout=10):
59
60 self._label = label
61 self.channel = channel
62 self.nodes_per_block = nodes_per_block
63 self.init_blocks = init_blocks
64 self.min_blocks = min_blocks
65 self.max_blocks = max_blocks
66 self.parallelism = parallelism
67 self.launcher = launcher
68 self.walltime = walltime
69 self.cmd_timeout = cmd_timeout
70 if not isinstance(self.launcher, Launcher):
71 raise BadLauncher(self.launcher)
72
73 self.script_dir = None
74
75 # Dictionary that keeps track of jobs, keyed on job_id
76 self.resources = {}
77
78 def execute_wait(self, cmd, timeout=None):
79 t = self.cmd_timeout
80 if timeout is not None:
81 t = timeout
82 return self.channel.execute_wait(cmd, t)
83
84 def _write_submit_script(self, template, script_filename, job_name, configs):
85 """Generate submit script and write it to a file.
86
87 Args:
88 - template (string) : The template string to be used for the writing submit script
89 - script_filename (string) : Name of the submit script
90 - job_name (string) : job name
91 - configs (dict) : configs that get pushed into the template
92
93 Returns:
94 - True: on success
95
96 Raises:
97 SchedulerMissingArgs : If template is missing args
98 ScriptPathError : Unable to write submit script out
99 """
100
101 try:
102 submit_script = Template(template).substitute(jobname=job_name, **configs)
103 with open(script_filename, 'w') as f:
104 f.write(submit_script)
105
106 except KeyError as e:
107 logger.error("Missing keys for submit script : %s", e)
108 raise SchedulerMissingArgs(e.args, self.label)
109
110 except IOError as e:
111 logger.error("Failed writing to submit script: %s", script_filename)
112 raise ScriptPathError(script_filename, e)
113 except Exception as e:
114 print("Template : ", template)
115 print("Args : ", job_name)
116 print("Kwargs : ", configs)
117 logger.error("Uncategorized error: %s", e)
118 raise e
119
120 return True
121
122 @abstractmethod
123 def _status(self):
124 pass
125
126 def status(self, job_ids):
127 """ Get the status of a list of jobs identified by the job identifiers
128 returned from the submit request.
129
130 Args:
131 - job_ids (list) : A list of job identifiers
132
133 Returns:
134 - A list of JobStatus objects corresponding to each job_id in the job_ids list.
135
136 Raises:
137 - ExecutionProviderException or its subclasses
138
139 """
140 if job_ids:
141 self._status()
142 return [self.resources[jid]['status'] for jid in job_ids]
143
144 @property
145 def label(self):
146 return self._label
147
[end of parsl/providers/cluster_provider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/providers/cluster_provider.py b/parsl/providers/cluster_provider.py
--- a/parsl/providers/cluster_provider.py
+++ b/parsl/providers/cluster_provider.py
@@ -91,7 +91,7 @@
- configs (dict) : configs that get pushed into the template
Returns:
- - True: on success
+ - None
Raises:
SchedulerMissingArgs : If template is missing args
@@ -117,8 +117,6 @@
logger.error("Uncategorized error: %s", e)
raise e
- return True
-
@abstractmethod
def _status(self):
pass
| {"golden_diff": "diff --git a/parsl/providers/cluster_provider.py b/parsl/providers/cluster_provider.py\n--- a/parsl/providers/cluster_provider.py\n+++ b/parsl/providers/cluster_provider.py\n@@ -91,7 +91,7 @@\n - configs (dict) : configs that get pushed into the template\n \n Returns:\n- - True: on success\n+ - None\n \n Raises:\n SchedulerMissingArgs : If template is missing args\n@@ -117,8 +117,6 @@\n logger.error(\"Uncategorized error: %s\", e)\n raise e\n \n- return True\n-\n @abstractmethod\n def _status(self):\n pass\n", "issue": "parsl.providers.cluster_provider _write_submit_script should return nothing, rather than constant True\n**Describe the bug**\r\n\r\n_write_submit_script in parsl.providers.cluster_provider indicates failure by raising an exception, and so should not be returning a True (or False) value. Instead it should return None by either `return` on its own, or falling off the end of the method without a return statement.\r\n\r\nTo tidy this up, change that return handling. Edit the docstring to match.\r\n\r\n\n", "before_files": [{"content": "import logging\nfrom abc import abstractmethod\nfrom string import Template\n\nfrom parsl.providers.errors import SchedulerMissingArgs, ScriptPathError\nfrom parsl.launchers.base import Launcher\nfrom parsl.launchers.errors import BadLauncher\nfrom parsl.providers.base import ExecutionProvider\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClusterProvider(ExecutionProvider):\n \"\"\" This class defines behavior common to all cluster/supercompute-style scheduler systems.\n\n Parameters\n ----------\n label : str\n Label for this provider.\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n launcher : Launcher\n Launcher for this provider.\n cmd_timeout : int\n Timeout for commands made to the scheduler in seconds\n\n .. code:: python\n\n +------------------\n |\n script_string ------->| submit\n id <--------|---+\n |\n [ ids ] ------->| status\n [statuses] <--------|----+\n |\n [ ids ] ------->| cancel\n [cancel] <--------|----+\n |\n +-------------------\n \"\"\"\n\n def __init__(self,\n label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher,\n cmd_timeout=10):\n\n self._label = label\n self.channel = channel\n self.nodes_per_block = nodes_per_block\n self.init_blocks = init_blocks\n self.min_blocks = min_blocks\n self.max_blocks = max_blocks\n self.parallelism = parallelism\n self.launcher = launcher\n self.walltime = walltime\n self.cmd_timeout = cmd_timeout\n if not isinstance(self.launcher, Launcher):\n raise BadLauncher(self.launcher)\n\n self.script_dir = None\n\n # Dictionary that keeps track of jobs, keyed on job_id\n self.resources = {}\n\n def execute_wait(self, cmd, timeout=None):\n t = self.cmd_timeout\n if timeout is not None:\n t = timeout\n return self.channel.execute_wait(cmd, t)\n\n def _write_submit_script(self, template, script_filename, job_name, configs):\n \"\"\"Generate submit script and write it to a file.\n\n Args:\n - template (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n - job_name (string) : job name\n - configs (dict) : configs that get pushed into the template\n\n Returns:\n - True: on success\n\n Raises:\n SchedulerMissingArgs : If template is missing args\n ScriptPathError : Unable to write submit script out\n \"\"\"\n\n try:\n submit_script = Template(template).substitute(jobname=job_name, **configs)\n with open(script_filename, 'w') as f:\n f.write(submit_script)\n\n except KeyError as e:\n logger.error(\"Missing keys for submit script : %s\", e)\n raise SchedulerMissingArgs(e.args, self.label)\n\n except IOError as e:\n logger.error(\"Failed writing to submit script: %s\", script_filename)\n raise ScriptPathError(script_filename, e)\n except Exception as e:\n print(\"Template : \", template)\n print(\"Args : \", job_name)\n print(\"Kwargs : \", configs)\n logger.error(\"Uncategorized error: %s\", e)\n raise e\n\n return True\n\n @abstractmethod\n def _status(self):\n pass\n\n def status(self, job_ids):\n \"\"\" Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n\n Args:\n - job_ids (list) : A list of job identifiers\n\n Returns:\n - A list of JobStatus objects corresponding to each job_id in the job_ids list.\n\n Raises:\n - ExecutionProviderException or its subclasses\n\n \"\"\"\n if job_ids:\n self._status()\n return [self.resources[jid]['status'] for jid in job_ids]\n\n @property\n def label(self):\n return self._label\n", "path": "parsl/providers/cluster_provider.py"}]} | 1,925 | 152 |
gh_patches_debug_28089 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-1603 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NMS fails on any non-default GPU.
I'm creating an issue that corresponds to the problem that came up in #1603. I'm reposting the body of that issue here:
I get an error when I try to run NMS code on any GPU except 0.
The issue is that I get RuntimeError: cuda runtime error (700) : an illegal memory access was encountered at mmdet/ops/nms/src/nms_kernel.cu:103 when I try to run NMS with a Tensor on any device except CPU or 0. The error happens on this line:
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
But I believe the issue is actually here:
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
My guess is that THCudaMalloc is creating the mask_dev array on device 0 and not the device corresponding to the input at::Tensor boxes. It looks like state might encode which device a new cuda array is allocated on, so my intuition would be to try and grab the state from boxes. However, I'm not a CUDA expert, so I'm probably totally off base for how to use THCState objects. I was attempting to look through the pytorch docs / source to see if I could figure something out, but I'm not having any luck.
Any pointers on how this issue might be handled would be appreciated. Note that if you have two GPUs you can reproduce the error by checking out this PR and running: xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_gpu
</issue>
<code>
[start of mmdet/ops/nms/nms_wrapper.py]
1 import numpy as np
2 import torch
3
4 from . import nms_cpu, nms_cuda
5 from .soft_nms_cpu import soft_nms_cpu
6
7
8 def nms(dets, iou_thr, device_id=None):
9 """Dispatch to either CPU or GPU NMS implementations.
10
11 The input can be either a torch tensor or numpy array. GPU NMS will be used
12 if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
13 will be used. The returned type will always be the same as inputs.
14
15 Arguments:
16 dets (torch.Tensor or np.ndarray): bboxes with scores.
17 iou_thr (float): IoU threshold for NMS.
18 device_id (int, optional): when `dets` is a numpy array, if `device_id`
19 is None, then cpu nms is used, otherwise gpu_nms will be used.
20
21 Returns:
22 tuple: kept bboxes and indice, which is always the same data type as
23 the input.
24 """
25 # convert dets (tensor or numpy array) to tensor
26 if isinstance(dets, torch.Tensor):
27 is_numpy = False
28 dets_th = dets
29 elif isinstance(dets, np.ndarray):
30 is_numpy = True
31 device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
32 dets_th = torch.from_numpy(dets).to(device)
33 else:
34 raise TypeError(
35 'dets must be either a Tensor or numpy array, but got {}'.format(
36 type(dets)))
37
38 # execute cpu or cuda nms
39 if dets_th.shape[0] == 0:
40 inds = dets_th.new_zeros(0, dtype=torch.long)
41 else:
42 if dets_th.is_cuda:
43 inds = nms_cuda.nms(dets_th, iou_thr)
44 else:
45 inds = nms_cpu.nms(dets_th, iou_thr)
46
47 if is_numpy:
48 inds = inds.cpu().numpy()
49 return dets[inds, :], inds
50
51
52 def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
53 if isinstance(dets, torch.Tensor):
54 is_tensor = True
55 dets_np = dets.detach().cpu().numpy()
56 elif isinstance(dets, np.ndarray):
57 is_tensor = False
58 dets_np = dets
59 else:
60 raise TypeError(
61 'dets must be either a Tensor or numpy array, but got {}'.format(
62 type(dets)))
63
64 method_codes = {'linear': 1, 'gaussian': 2}
65 if method not in method_codes:
66 raise ValueError('Invalid method for SoftNMS: {}'.format(method))
67 new_dets, inds = soft_nms_cpu(
68 dets_np,
69 iou_thr,
70 method=method_codes[method],
71 sigma=sigma,
72 min_score=min_score)
73
74 if is_tensor:
75 return dets.new_tensor(new_dets), dets.new_tensor(
76 inds, dtype=torch.long)
77 else:
78 return new_dets.astype(np.float32), inds.astype(np.int64)
79
[end of mmdet/ops/nms/nms_wrapper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmdet/ops/nms/nms_wrapper.py b/mmdet/ops/nms/nms_wrapper.py
--- a/mmdet/ops/nms/nms_wrapper.py
+++ b/mmdet/ops/nms/nms_wrapper.py
@@ -21,6 +21,18 @@
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
+
+ Example:
+ >>> dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9],
+ >>> [49.3, 32.9, 51.0, 35.3, 0.9],
+ >>> [49.2, 31.8, 51.0, 35.4, 0.5],
+ >>> [35.1, 11.5, 39.1, 15.7, 0.5],
+ >>> [35.6, 11.8, 39.3, 14.2, 0.5],
+ >>> [35.3, 11.5, 39.9, 14.5, 0.4],
+ >>> [35.2, 11.7, 39.7, 15.7, 0.3]], dtype=np.float32)
+ >>> iou_thr = 0.7
+ >>> supressed, inds = nms(dets, iou_thr)
+ >>> assert len(inds) == len(supressed) == 3
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
@@ -50,6 +62,18 @@
def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
+ """
+ Example:
+ >>> dets = np.array([[4., 3., 5., 3., 0.9],
+ >>> [4., 3., 5., 4., 0.9],
+ >>> [3., 1., 3., 1., 0.5],
+ >>> [3., 1., 3., 1., 0.5],
+ >>> [3., 1., 3., 1., 0.4],
+ >>> [3., 1., 3., 1., 0.0]], dtype=np.float32)
+ >>> iou_thr = 0.7
+ >>> supressed, inds = soft_nms(dets, iou_thr, sigma=0.5)
+ >>> assert len(inds) == len(supressed) == 3
+ """
if isinstance(dets, torch.Tensor):
is_tensor = True
dets_np = dets.detach().cpu().numpy()
| {"golden_diff": "diff --git a/mmdet/ops/nms/nms_wrapper.py b/mmdet/ops/nms/nms_wrapper.py\n--- a/mmdet/ops/nms/nms_wrapper.py\n+++ b/mmdet/ops/nms/nms_wrapper.py\n@@ -21,6 +21,18 @@\n Returns:\n tuple: kept bboxes and indice, which is always the same data type as\n the input.\n+\n+ Example:\n+ >>> dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9],\n+ >>> [49.3, 32.9, 51.0, 35.3, 0.9],\n+ >>> [49.2, 31.8, 51.0, 35.4, 0.5],\n+ >>> [35.1, 11.5, 39.1, 15.7, 0.5],\n+ >>> [35.6, 11.8, 39.3, 14.2, 0.5],\n+ >>> [35.3, 11.5, 39.9, 14.5, 0.4],\n+ >>> [35.2, 11.7, 39.7, 15.7, 0.3]], dtype=np.float32)\n+ >>> iou_thr = 0.7\n+ >>> supressed, inds = nms(dets, iou_thr)\n+ >>> assert len(inds) == len(supressed) == 3\n \"\"\"\n # convert dets (tensor or numpy array) to tensor\n if isinstance(dets, torch.Tensor):\n@@ -50,6 +62,18 @@\n \n \n def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):\n+ \"\"\"\n+ Example:\n+ >>> dets = np.array([[4., 3., 5., 3., 0.9],\n+ >>> [4., 3., 5., 4., 0.9],\n+ >>> [3., 1., 3., 1., 0.5],\n+ >>> [3., 1., 3., 1., 0.5],\n+ >>> [3., 1., 3., 1., 0.4],\n+ >>> [3., 1., 3., 1., 0.0]], dtype=np.float32)\n+ >>> iou_thr = 0.7\n+ >>> supressed, inds = soft_nms(dets, iou_thr, sigma=0.5)\n+ >>> assert len(inds) == len(supressed) == 3\n+ \"\"\"\n if isinstance(dets, torch.Tensor):\n is_tensor = True\n dets_np = dets.detach().cpu().numpy()\n", "issue": "NMS fails on any non-default GPU. \nI'm creating an issue that corresponds to the problem that came up in #1603. I'm reposting the body of that issue here:\r\n\r\nI get an error when I try to run NMS code on any GPU except 0. \r\n\r\nThe issue is that I get RuntimeError: cuda runtime error (700) : an illegal memory access was encountered at mmdet/ops/nms/src/nms_kernel.cu:103 when I try to run NMS with a Tensor on any device except CPU or 0. The error happens on this line:\r\n\r\n THCudaCheck(cudaMemcpy(&mask_host[0],\r\n mask_dev,\r\n sizeof(unsigned long long) * boxes_num * col_blocks,\r\n cudaMemcpyDeviceToHost));\r\nBut I believe the issue is actually here:\r\n\r\n THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState\r\n\r\n unsigned long long* mask_dev = NULL;\r\n //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,\r\n // boxes_num * col_blocks * sizeof(unsigned long long)));\r\n\r\n mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));\r\nMy guess is that THCudaMalloc is creating the mask_dev array on device 0 and not the device corresponding to the input at::Tensor boxes. It looks like state might encode which device a new cuda array is allocated on, so my intuition would be to try and grab the state from boxes. However, I'm not a CUDA expert, so I'm probably totally off base for how to use THCState objects. I was attempting to look through the pytorch docs / source to see if I could figure something out, but I'm not having any luck.\r\n\r\nAny pointers on how this issue might be handled would be appreciated. Note that if you have two GPUs you can reproduce the error by checking out this PR and running: xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_gpu\r\n\n", "before_files": [{"content": "import numpy as np\nimport torch\n\nfrom . import nms_cpu, nms_cuda\nfrom .soft_nms_cpu import soft_nms_cpu\n\n\ndef nms(dets, iou_thr, device_id=None):\n \"\"\"Dispatch to either CPU or GPU NMS implementations.\n\n The input can be either a torch tensor or numpy array. GPU NMS will be used\n if the input is a gpu tensor or device_id is specified, otherwise CPU NMS\n will be used. The returned type will always be the same as inputs.\n\n Arguments:\n dets (torch.Tensor or np.ndarray): bboxes with scores.\n iou_thr (float): IoU threshold for NMS.\n device_id (int, optional): when `dets` is a numpy array, if `device_id`\n is None, then cpu nms is used, otherwise gpu_nms will be used.\n\n Returns:\n tuple: kept bboxes and indice, which is always the same data type as\n the input.\n \"\"\"\n # convert dets (tensor or numpy array) to tensor\n if isinstance(dets, torch.Tensor):\n is_numpy = False\n dets_th = dets\n elif isinstance(dets, np.ndarray):\n is_numpy = True\n device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)\n dets_th = torch.from_numpy(dets).to(device)\n else:\n raise TypeError(\n 'dets must be either a Tensor or numpy array, but got {}'.format(\n type(dets)))\n\n # execute cpu or cuda nms\n if dets_th.shape[0] == 0:\n inds = dets_th.new_zeros(0, dtype=torch.long)\n else:\n if dets_th.is_cuda:\n inds = nms_cuda.nms(dets_th, iou_thr)\n else:\n inds = nms_cpu.nms(dets_th, iou_thr)\n\n if is_numpy:\n inds = inds.cpu().numpy()\n return dets[inds, :], inds\n\n\ndef soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):\n if isinstance(dets, torch.Tensor):\n is_tensor = True\n dets_np = dets.detach().cpu().numpy()\n elif isinstance(dets, np.ndarray):\n is_tensor = False\n dets_np = dets\n else:\n raise TypeError(\n 'dets must be either a Tensor or numpy array, but got {}'.format(\n type(dets)))\n\n method_codes = {'linear': 1, 'gaussian': 2}\n if method not in method_codes:\n raise ValueError('Invalid method for SoftNMS: {}'.format(method))\n new_dets, inds = soft_nms_cpu(\n dets_np,\n iou_thr,\n method=method_codes[method],\n sigma=sigma,\n min_score=min_score)\n\n if is_tensor:\n return dets.new_tensor(new_dets), dets.new_tensor(\n inds, dtype=torch.long)\n else:\n return new_dets.astype(np.float32), inds.astype(np.int64)\n", "path": "mmdet/ops/nms/nms_wrapper.py"}]} | 1,814 | 684 |
gh_patches_debug_28277 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-458 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
export image bug related to type casting
Hi there,
I've got a Qt5-only Python distro installed (WinPython 3.5 Qt5) which includes pyqtgraph 0.10.0. Exporting images from PlotWidgets and ImageViews doesn't work anymore and gives this exception:
`Traceback (most recent call last):
File "C:\WinPython35_Qt5\python-3.5.3.amd64\lib\site-packages\pyqtgraph\exporters\Exporter.py", line 77, in fileSaveFinished
self.export(fileName=fileName, **self.fileDialog.opts)
File "C:\WinPython35_Qt5\python-3.5.3.amd64\lib\site-packages\pyqtgraph\exporters\ImageExporter.py", line 70, in export
bg = np.empty((self.params['width'], self.params['height'], 4), dtype=np.ubyte)
TypeError: 'float' object cannot be interpreted as an integer
QWaitCondition: Destroyed while threads are still waiting`
Didn't happen with WinPython 3.5 Qt4 (pyqtgraph 0.9.10 I think). Am I the only one experiencing this?
Update: simple fix: in ImageExporter.py, line 70:
`bg = np.empty((int(self.params['width']), int(self.params['height']), 4), dtype=np.ubyte)`
</issue>
<code>
[start of pyqtgraph/exporters/ImageExporter.py]
1 from .Exporter import Exporter
2 from ..parametertree import Parameter
3 from ..Qt import QtGui, QtCore, QtSvg, USE_PYSIDE
4 from .. import functions as fn
5 import numpy as np
6
7 __all__ = ['ImageExporter']
8
9 class ImageExporter(Exporter):
10 Name = "Image File (PNG, TIF, JPG, ...)"
11 allowCopy = True
12
13 def __init__(self, item):
14 Exporter.__init__(self, item)
15 tr = self.getTargetRect()
16 if isinstance(item, QtGui.QGraphicsItem):
17 scene = item.scene()
18 else:
19 scene = item
20 bgbrush = scene.views()[0].backgroundBrush()
21 bg = bgbrush.color()
22 if bgbrush.style() == QtCore.Qt.NoBrush:
23 bg.setAlpha(0)
24
25 self.params = Parameter(name='params', type='group', children=[
26 {'name': 'width', 'type': 'int', 'value': tr.width(), 'limits': (0, None)},
27 {'name': 'height', 'type': 'int', 'value': tr.height(), 'limits': (0, None)},
28 {'name': 'antialias', 'type': 'bool', 'value': True},
29 {'name': 'background', 'type': 'color', 'value': bg},
30 ])
31 self.params.param('width').sigValueChanged.connect(self.widthChanged)
32 self.params.param('height').sigValueChanged.connect(self.heightChanged)
33
34 def widthChanged(self):
35 sr = self.getSourceRect()
36 ar = float(sr.height()) / sr.width()
37 self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
38
39 def heightChanged(self):
40 sr = self.getSourceRect()
41 ar = float(sr.width()) / sr.height()
42 self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
43
44 def parameters(self):
45 return self.params
46
47 def export(self, fileName=None, toBytes=False, copy=False):
48 if fileName is None and not toBytes and not copy:
49 if USE_PYSIDE:
50 filter = ["*."+str(f) for f in QtGui.QImageWriter.supportedImageFormats()]
51 else:
52 filter = ["*."+bytes(f).decode('utf-8') for f in QtGui.QImageWriter.supportedImageFormats()]
53 preferred = ['*.png', '*.tif', '*.jpg']
54 for p in preferred[::-1]:
55 if p in filter:
56 filter.remove(p)
57 filter.insert(0, p)
58 self.fileSaveDialog(filter=filter)
59 return
60
61 targetRect = QtCore.QRect(0, 0, self.params['width'], self.params['height'])
62 sourceRect = self.getSourceRect()
63
64
65 #self.png = QtGui.QImage(targetRect.size(), QtGui.QImage.Format_ARGB32)
66 #self.png.fill(pyqtgraph.mkColor(self.params['background']))
67 w, h = self.params['width'], self.params['height']
68 if w == 0 or h == 0:
69 raise Exception("Cannot export image with size=0 (requested export size is %dx%d)" % (w,h))
70 bg = np.empty((self.params['width'], self.params['height'], 4), dtype=np.ubyte)
71 color = self.params['background']
72 bg[:,:,0] = color.blue()
73 bg[:,:,1] = color.green()
74 bg[:,:,2] = color.red()
75 bg[:,:,3] = color.alpha()
76 self.png = fn.makeQImage(bg, alpha=True)
77
78 ## set resolution of image:
79 origTargetRect = self.getTargetRect()
80 resolutionScale = targetRect.width() / origTargetRect.width()
81 #self.png.setDotsPerMeterX(self.png.dotsPerMeterX() * resolutionScale)
82 #self.png.setDotsPerMeterY(self.png.dotsPerMeterY() * resolutionScale)
83
84 painter = QtGui.QPainter(self.png)
85 #dtr = painter.deviceTransform()
86 try:
87 self.setExportMode(True, {'antialias': self.params['antialias'], 'background': self.params['background'], 'painter': painter, 'resolutionScale': resolutionScale})
88 painter.setRenderHint(QtGui.QPainter.Antialiasing, self.params['antialias'])
89 self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
90 finally:
91 self.setExportMode(False)
92 painter.end()
93
94 if copy:
95 QtGui.QApplication.clipboard().setImage(self.png)
96 elif toBytes:
97 return self.png
98 else:
99 self.png.save(fileName)
100
101 ImageExporter.register()
102
103
[end of pyqtgraph/exporters/ImageExporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyqtgraph/exporters/ImageExporter.py b/pyqtgraph/exporters/ImageExporter.py
--- a/pyqtgraph/exporters/ImageExporter.py
+++ b/pyqtgraph/exporters/ImageExporter.py
@@ -23,8 +23,8 @@
bg.setAlpha(0)
self.params = Parameter(name='params', type='group', children=[
- {'name': 'width', 'type': 'int', 'value': tr.width(), 'limits': (0, None)},
- {'name': 'height', 'type': 'int', 'value': tr.height(), 'limits': (0, None)},
+ {'name': 'width', 'type': 'int', 'value': int(tr.width()), 'limits': (0, None)},
+ {'name': 'height', 'type': 'int', 'value': int(tr.height()), 'limits': (0, None)},
{'name': 'antialias', 'type': 'bool', 'value': True},
{'name': 'background', 'type': 'color', 'value': bg},
])
@@ -34,12 +34,12 @@
def widthChanged(self):
sr = self.getSourceRect()
ar = float(sr.height()) / sr.width()
- self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
+ self.params.param('height').setValue(int(self.params['width'] * ar), blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = float(sr.width()) / sr.height()
- self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
+ self.params.param('width').setValue(int(self.params['height'] * ar), blockSignal=self.widthChanged)
def parameters(self):
return self.params
| {"golden_diff": "diff --git a/pyqtgraph/exporters/ImageExporter.py b/pyqtgraph/exporters/ImageExporter.py\n--- a/pyqtgraph/exporters/ImageExporter.py\n+++ b/pyqtgraph/exporters/ImageExporter.py\n@@ -23,8 +23,8 @@\n bg.setAlpha(0)\n \n self.params = Parameter(name='params', type='group', children=[\n- {'name': 'width', 'type': 'int', 'value': tr.width(), 'limits': (0, None)},\n- {'name': 'height', 'type': 'int', 'value': tr.height(), 'limits': (0, None)},\n+ {'name': 'width', 'type': 'int', 'value': int(tr.width()), 'limits': (0, None)},\n+ {'name': 'height', 'type': 'int', 'value': int(tr.height()), 'limits': (0, None)},\n {'name': 'antialias', 'type': 'bool', 'value': True},\n {'name': 'background', 'type': 'color', 'value': bg},\n ])\n@@ -34,12 +34,12 @@\n def widthChanged(self):\n sr = self.getSourceRect()\n ar = float(sr.height()) / sr.width()\n- self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)\n+ self.params.param('height').setValue(int(self.params['width'] * ar), blockSignal=self.heightChanged)\n \n def heightChanged(self):\n sr = self.getSourceRect()\n ar = float(sr.width()) / sr.height()\n- self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)\n+ self.params.param('width').setValue(int(self.params['height'] * ar), blockSignal=self.widthChanged)\n \n def parameters(self):\n return self.params\n", "issue": "export image bug related to type casting\nHi there,\r\n\r\nI've got a Qt5-only Python distro installed (WinPython 3.5 Qt5) which includes pyqtgraph 0.10.0. Exporting images from PlotWidgets and ImageViews doesn't work anymore and gives this exception:\r\n\r\n`Traceback (most recent call last):\r\n File \"C:\\WinPython35_Qt5\\python-3.5.3.amd64\\lib\\site-packages\\pyqtgraph\\exporters\\Exporter.py\", line 77, in fileSaveFinished\r\n self.export(fileName=fileName, **self.fileDialog.opts)\r\n File \"C:\\WinPython35_Qt5\\python-3.5.3.amd64\\lib\\site-packages\\pyqtgraph\\exporters\\ImageExporter.py\", line 70, in export\r\n bg = np.empty((self.params['width'], self.params['height'], 4), dtype=np.ubyte)\r\nTypeError: 'float' object cannot be interpreted as an integer\r\nQWaitCondition: Destroyed while threads are still waiting`\r\n\r\nDidn't happen with WinPython 3.5 Qt4 (pyqtgraph 0.9.10 I think). Am I the only one experiencing this?\r\n\r\nUpdate: simple fix: in ImageExporter.py, line 70:\r\n`bg = np.empty((int(self.params['width']), int(self.params['height']), 4), dtype=np.ubyte)`\n", "before_files": [{"content": "from .Exporter import Exporter\nfrom ..parametertree import Parameter\nfrom ..Qt import QtGui, QtCore, QtSvg, USE_PYSIDE\nfrom .. import functions as fn\nimport numpy as np\n\n__all__ = ['ImageExporter']\n\nclass ImageExporter(Exporter):\n Name = \"Image File (PNG, TIF, JPG, ...)\"\n allowCopy = True\n \n def __init__(self, item):\n Exporter.__init__(self, item)\n tr = self.getTargetRect()\n if isinstance(item, QtGui.QGraphicsItem):\n scene = item.scene()\n else:\n scene = item\n bgbrush = scene.views()[0].backgroundBrush()\n bg = bgbrush.color()\n if bgbrush.style() == QtCore.Qt.NoBrush:\n bg.setAlpha(0)\n \n self.params = Parameter(name='params', type='group', children=[\n {'name': 'width', 'type': 'int', 'value': tr.width(), 'limits': (0, None)},\n {'name': 'height', 'type': 'int', 'value': tr.height(), 'limits': (0, None)},\n {'name': 'antialias', 'type': 'bool', 'value': True},\n {'name': 'background', 'type': 'color', 'value': bg},\n ])\n self.params.param('width').sigValueChanged.connect(self.widthChanged)\n self.params.param('height').sigValueChanged.connect(self.heightChanged)\n \n def widthChanged(self):\n sr = self.getSourceRect()\n ar = float(sr.height()) / sr.width()\n self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)\n \n def heightChanged(self):\n sr = self.getSourceRect()\n ar = float(sr.width()) / sr.height()\n self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)\n \n def parameters(self):\n return self.params\n \n def export(self, fileName=None, toBytes=False, copy=False):\n if fileName is None and not toBytes and not copy:\n if USE_PYSIDE:\n filter = [\"*.\"+str(f) for f in QtGui.QImageWriter.supportedImageFormats()]\n else:\n filter = [\"*.\"+bytes(f).decode('utf-8') for f in QtGui.QImageWriter.supportedImageFormats()]\n preferred = ['*.png', '*.tif', '*.jpg']\n for p in preferred[::-1]:\n if p in filter:\n filter.remove(p)\n filter.insert(0, p)\n self.fileSaveDialog(filter=filter)\n return\n \n targetRect = QtCore.QRect(0, 0, self.params['width'], self.params['height'])\n sourceRect = self.getSourceRect()\n \n \n #self.png = QtGui.QImage(targetRect.size(), QtGui.QImage.Format_ARGB32)\n #self.png.fill(pyqtgraph.mkColor(self.params['background']))\n w, h = self.params['width'], self.params['height']\n if w == 0 or h == 0:\n raise Exception(\"Cannot export image with size=0 (requested export size is %dx%d)\" % (w,h))\n bg = np.empty((self.params['width'], self.params['height'], 4), dtype=np.ubyte)\n color = self.params['background']\n bg[:,:,0] = color.blue()\n bg[:,:,1] = color.green()\n bg[:,:,2] = color.red()\n bg[:,:,3] = color.alpha()\n self.png = fn.makeQImage(bg, alpha=True)\n \n ## set resolution of image:\n origTargetRect = self.getTargetRect()\n resolutionScale = targetRect.width() / origTargetRect.width()\n #self.png.setDotsPerMeterX(self.png.dotsPerMeterX() * resolutionScale)\n #self.png.setDotsPerMeterY(self.png.dotsPerMeterY() * resolutionScale)\n \n painter = QtGui.QPainter(self.png)\n #dtr = painter.deviceTransform()\n try:\n self.setExportMode(True, {'antialias': self.params['antialias'], 'background': self.params['background'], 'painter': painter, 'resolutionScale': resolutionScale})\n painter.setRenderHint(QtGui.QPainter.Antialiasing, self.params['antialias'])\n self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))\n finally:\n self.setExportMode(False)\n painter.end()\n \n if copy:\n QtGui.QApplication.clipboard().setImage(self.png)\n elif toBytes:\n return self.png\n else:\n self.png.save(fileName)\n \nImageExporter.register() \n \n", "path": "pyqtgraph/exporters/ImageExporter.py"}]} | 2,036 | 401 |
gh_patches_debug_17323 | rasdani/github-patches | git_diff | angr__angr-3374 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MIPS32BE: stack collided with heap?
Couldn't get to the bottom of this one...
```
#!/home/<user>/angr_pypy/bin/python
import angr
import claripy
import monkeyhex
import logging
import pickle
import time
import sys
import os
import socket
import copy
import argparse
import ipdb
from IPython import embed
proj = angr.Project("httpd", auto_load_libs=False, except_missing_libs=False)
cfg = proj.analyses.CFGFast(normalize=True,
fail_fast=True,
force_complete_scan=False,
data_references=False,
cross_references=False,
show_progressbar=True)
# some functions we're interested in
funcs = proj.kb.functions
parse_http_req = funcs[0x408f90]
s = proj.factory.blank_state(addr=parse_http_req.addr)
# running it in a simulation manager will allow us to examine the state after it errors out
sm = proj.factory.simulation_manager(s)
sm.run()
embed()
```
[httpd.zip](https://github.com/angr/angr/files/7671480/httpd.zip)
</issue>
<code>
[start of angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py]
1 import logging
2
3 from .paged_memory_mixin import PagedMemoryMixin
4 from ....errors import SimSegfaultException, SimMemoryError
5
6 l = logging.getLogger(__name__)
7
8 class StackAllocationMixin(PagedMemoryMixin):
9 """
10 This mixin adds automatic allocation for a stack region based on the stack_end and stack_size parameters.
11 """
12 # TODO: multiple stacks. this scheme should scale p well
13 # TODO tbh this should be handled by an actual fault handler in simos or something
14 def __init__(self, stack_end=None, stack_size=None, stack_perms=None, **kwargs):
15 super().__init__(**kwargs)
16 self._red_pageno = (stack_end - 1) // self.page_size if stack_end is not None else None
17 self._remaining_stack = stack_size
18 self._stack_perms = stack_perms
19
20 def copy(self, memo):
21 o = super().copy(memo)
22 o._red_pageno = self._red_pageno
23 o._remaining_stack = self._remaining_stack
24 o._stack_perms = self._stack_perms
25 return o
26
27 def allocate_stack_pages(self, addr: int, size: int, **kwargs):
28 """
29 Pre-allocates pages for the stack without triggering any logic related to reading from them.
30
31 :param addr: The highest address that should be mapped
32 :param size: The number of bytes to be allocated. byte 1 is the one at addr, byte 2 is the one before that, and so on.
33 :return: A list of the new page objects
34 """
35 # weird off-by-ones here. we want to calculate the last byte requested, find its pageno, and then use that to determine what the last page allocated will be and then how many pages are touched
36 pageno = addr // self.page_size
37 if pageno != self._red_pageno:
38 raise SimMemoryError("Trying to allocate stack space in a place that isn't the top of the stack")
39 num = pageno - ((addr - size + 1) // self.page_size) + 1
40
41 result = []
42 for _ in range(num):
43 new_red_pageno = (self._red_pageno - 1) % ((1 << self.state.arch.bits) // self.page_size)
44 if new_red_pageno in self._pages:
45 raise SimSegfaultException(self._red_pageno * self.page_size, "stack collided with heap")
46
47 if self._remaining_stack is not None and self._remaining_stack < self.page_size:
48 raise SimSegfaultException(self._red_pageno * self.page_size, "exhausted stack quota")
49
50 l.debug("Allocating new stack page at %#x", self._red_pageno * self.page_size)
51 result.append(PagedMemoryMixin._initialize_default_page(self, self._red_pageno, permissions=self._stack_perms, **kwargs))
52 self._pages[self._red_pageno] = result[-1]
53
54 self._red_pageno = new_red_pageno
55 if self._remaining_stack is not None:
56 self._remaining_stack -= self.page_size
57
58 return result
59
60 def _initialize_page(self, pageno: int, **kwargs):
61 if pageno != self._red_pageno:
62 return super()._initialize_page(pageno, **kwargs)
63
64 return self.allocate_stack_pages((pageno + 1) * self.page_size - 1, self.page_size)[0]
65
[end of angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py b/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py
--- a/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py
+++ b/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py
@@ -2,6 +2,7 @@
from .paged_memory_mixin import PagedMemoryMixin
from ....errors import SimSegfaultException, SimMemoryError
+from ....sim_options import STRICT_PAGE_ACCESS
l = logging.getLogger(__name__)
@@ -58,7 +59,7 @@
return result
def _initialize_page(self, pageno: int, **kwargs):
- if pageno != self._red_pageno:
+ if pageno != self._red_pageno or STRICT_PAGE_ACCESS not in self.state.options:
return super()._initialize_page(pageno, **kwargs)
return self.allocate_stack_pages((pageno + 1) * self.page_size - 1, self.page_size)[0]
| {"golden_diff": "diff --git a/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py b/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py\n--- a/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py\n+++ b/angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py\n@@ -2,6 +2,7 @@\n \n from .paged_memory_mixin import PagedMemoryMixin\n from ....errors import SimSegfaultException, SimMemoryError\n+from ....sim_options import STRICT_PAGE_ACCESS\n \n l = logging.getLogger(__name__)\n \n@@ -58,7 +59,7 @@\n return result\n \n def _initialize_page(self, pageno: int, **kwargs):\n- if pageno != self._red_pageno:\n+ if pageno != self._red_pageno or STRICT_PAGE_ACCESS not in self.state.options:\n return super()._initialize_page(pageno, **kwargs)\n \n return self.allocate_stack_pages((pageno + 1) * self.page_size - 1, self.page_size)[0]\n", "issue": "MIPS32BE: stack collided with heap?\nCouldn't get to the bottom of this one...\r\n\r\n```\r\n#!/home/<user>/angr_pypy/bin/python\r\n\r\nimport angr\r\nimport claripy\r\nimport monkeyhex\r\nimport logging\r\nimport pickle\r\nimport time\r\nimport sys\r\nimport os\r\nimport socket\r\nimport copy\r\nimport argparse\r\nimport ipdb\r\n\r\nfrom IPython import embed\r\n\r\nproj = angr.Project(\"httpd\", auto_load_libs=False, except_missing_libs=False)\r\n\r\ncfg = proj.analyses.CFGFast(normalize=True,\r\n fail_fast=True,\r\n force_complete_scan=False,\r\n data_references=False,\r\n cross_references=False,\r\n show_progressbar=True)\r\n\r\n# some functions we're interested in\r\nfuncs = proj.kb.functions\r\nparse_http_req = funcs[0x408f90]\r\n\r\ns = proj.factory.blank_state(addr=parse_http_req.addr)\r\n\r\n# running it in a simulation manager will allow us to examine the state after it errors out\r\nsm = proj.factory.simulation_manager(s)\r\nsm.run()\r\n\r\nembed()\r\n```\r\n[httpd.zip](https://github.com/angr/angr/files/7671480/httpd.zip)\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom .paged_memory_mixin import PagedMemoryMixin\nfrom ....errors import SimSegfaultException, SimMemoryError\n\nl = logging.getLogger(__name__)\n\nclass StackAllocationMixin(PagedMemoryMixin):\n \"\"\"\n This mixin adds automatic allocation for a stack region based on the stack_end and stack_size parameters.\n \"\"\"\n # TODO: multiple stacks. this scheme should scale p well\n # TODO tbh this should be handled by an actual fault handler in simos or something\n def __init__(self, stack_end=None, stack_size=None, stack_perms=None, **kwargs):\n super().__init__(**kwargs)\n self._red_pageno = (stack_end - 1) // self.page_size if stack_end is not None else None\n self._remaining_stack = stack_size\n self._stack_perms = stack_perms\n\n def copy(self, memo):\n o = super().copy(memo)\n o._red_pageno = self._red_pageno\n o._remaining_stack = self._remaining_stack\n o._stack_perms = self._stack_perms\n return o\n\n def allocate_stack_pages(self, addr: int, size: int, **kwargs):\n \"\"\"\n Pre-allocates pages for the stack without triggering any logic related to reading from them.\n\n :param addr: The highest address that should be mapped\n :param size: The number of bytes to be allocated. byte 1 is the one at addr, byte 2 is the one before that, and so on.\n :return: A list of the new page objects\n \"\"\"\n # weird off-by-ones here. we want to calculate the last byte requested, find its pageno, and then use that to determine what the last page allocated will be and then how many pages are touched\n pageno = addr // self.page_size\n if pageno != self._red_pageno:\n raise SimMemoryError(\"Trying to allocate stack space in a place that isn't the top of the stack\")\n num = pageno - ((addr - size + 1) // self.page_size) + 1\n\n result = []\n for _ in range(num):\n new_red_pageno = (self._red_pageno - 1) % ((1 << self.state.arch.bits) // self.page_size)\n if new_red_pageno in self._pages:\n raise SimSegfaultException(self._red_pageno * self.page_size, \"stack collided with heap\")\n\n if self._remaining_stack is not None and self._remaining_stack < self.page_size:\n raise SimSegfaultException(self._red_pageno * self.page_size, \"exhausted stack quota\")\n\n l.debug(\"Allocating new stack page at %#x\", self._red_pageno * self.page_size)\n result.append(PagedMemoryMixin._initialize_default_page(self, self._red_pageno, permissions=self._stack_perms, **kwargs))\n self._pages[self._red_pageno] = result[-1]\n\n self._red_pageno = new_red_pageno\n if self._remaining_stack is not None:\n self._remaining_stack -= self.page_size\n\n return result\n\n def _initialize_page(self, pageno: int, **kwargs):\n if pageno != self._red_pageno:\n return super()._initialize_page(pageno, **kwargs)\n\n return self.allocate_stack_pages((pageno + 1) * self.page_size - 1, self.page_size)[0]\n", "path": "angr/storage/memory_mixins/paged_memory/stack_allocation_mixin.py"}]} | 1,692 | 241 |
gh_patches_debug_47849 | rasdani/github-patches | git_diff | searx__searx-1304 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Engines cannot retrieve results: piratebay (request exception): PirateBay changed URL
When some text is entered, and I click on General and Files several times, it shows this error:
```
Error! Engines cannot retrieve results.
piratebay (request exception)
Please, try again later or find another searx instance.
```
Version 0.14.0 on FreeBSD.
Default config.
</issue>
<code>
[start of searx/engines/piratebay.py]
1 # Piratebay (Videos, Music, Files)
2 #
3 # @website https://thepiratebay.se
4 # @provide-api no (nothing found)
5 #
6 # @using-api no
7 # @results HTML (using search portal)
8 # @stable yes (HTML can change)
9 # @parse url, title, content, seed, leech, magnetlink
10
11 from lxml import html
12 from operator import itemgetter
13 from searx.engines.xpath import extract_text
14 from searx.url_utils import quote, urljoin
15
16 # engine dependent config
17 categories = ['videos', 'music', 'files']
18 paging = True
19
20 # search-url
21 url = 'https://thepiratebay.se/'
22 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
23
24 # piratebay specific type-definitions
25 search_types = {'files': '0',
26 'music': '100',
27 'videos': '200'}
28
29 # specific xpath variables
30 magnet_xpath = './/a[@title="Download this torrent using magnet"]'
31 torrent_xpath = './/a[@title="Download this torrent"]'
32 content_xpath = './/font[@class="detDesc"]'
33
34
35 # do search-request
36 def request(query, params):
37 search_type = search_types.get(params['category'], '0')
38
39 params['url'] = search_url.format(search_term=quote(query),
40 search_type=search_type,
41 pageno=params['pageno'] - 1)
42
43 return params
44
45
46 # get response from search-request
47 def response(resp):
48 results = []
49
50 dom = html.fromstring(resp.text)
51
52 search_res = dom.xpath('//table[@id="searchResult"]//tr')
53
54 # return empty array if nothing is found
55 if not search_res:
56 return []
57
58 # parse results
59 for result in search_res[1:]:
60 link = result.xpath('.//div[@class="detName"]//a')[0]
61 href = urljoin(url, link.attrib.get('href'))
62 title = extract_text(link)
63 content = extract_text(result.xpath(content_xpath))
64 seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
65
66 # convert seed to int if possible
67 if seed.isdigit():
68 seed = int(seed)
69 else:
70 seed = 0
71
72 # convert leech to int if possible
73 if leech.isdigit():
74 leech = int(leech)
75 else:
76 leech = 0
77
78 magnetlink = result.xpath(magnet_xpath)[0]
79 torrentfile_links = result.xpath(torrent_xpath)
80 if torrentfile_links:
81 torrentfile_link = torrentfile_links[0].attrib.get('href')
82 else:
83 torrentfile_link = None
84
85 # append result
86 results.append({'url': href,
87 'title': title,
88 'content': content,
89 'seed': seed,
90 'leech': leech,
91 'magnetlink': magnetlink.attrib.get('href'),
92 'torrentfile': torrentfile_link,
93 'template': 'torrent.html'})
94
95 # return results sorted by seeder
96 return sorted(results, key=itemgetter('seed'), reverse=True)
97
[end of searx/engines/piratebay.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py
--- a/searx/engines/piratebay.py
+++ b/searx/engines/piratebay.py
@@ -18,7 +18,7 @@
paging = True
# search-url
-url = 'https://thepiratebay.se/'
+url = 'https://thepiratebay.org/'
search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
# piratebay specific type-definitions
| {"golden_diff": "diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py\n--- a/searx/engines/piratebay.py\n+++ b/searx/engines/piratebay.py\n@@ -18,7 +18,7 @@\n paging = True\n \n # search-url\n-url = 'https://thepiratebay.se/'\n+url = 'https://thepiratebay.org/'\n search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'\n \n # piratebay specific type-definitions\n", "issue": "Engines cannot retrieve results: piratebay (request exception): PirateBay changed URL\nWhen some text is entered, and I click on General and Files several times, it shows this error:\r\n```\r\nError! Engines cannot retrieve results.\r\npiratebay (request exception)\r\nPlease, try again later or find another searx instance.\r\n```\r\n\r\nVersion 0.14.0 on FreeBSD.\r\nDefault config.\n", "before_files": [{"content": "# Piratebay (Videos, Music, Files)\n#\n# @website https://thepiratebay.se\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import quote, urljoin\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://thepiratebay.se/'\nsearch_url = url + 'search/{search_term}/{pageno}/99/{search_type}'\n\n# piratebay specific type-definitions\nsearch_types = {'files': '0',\n 'music': '100',\n 'videos': '200'}\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Download this torrent using magnet\"]'\ntorrent_xpath = './/a[@title=\"Download this torrent\"]'\ncontent_xpath = './/font[@class=\"detDesc\"]'\n\n\n# do search-request\ndef request(query, params):\n search_type = search_types.get(params['category'], '0')\n\n params['url'] = search_url.format(search_term=quote(query),\n search_type=search_type,\n pageno=params['pageno'] - 1)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@id=\"searchResult\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//div[@class=\"detName\"]//a')[0]\n href = urljoin(url, link.attrib.get('href'))\n title = extract_text(link)\n content = extract_text(result.xpath(content_xpath))\n seed, leech = result.xpath('.//td[@align=\"right\"]/text()')[:2]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n magnetlink = result.xpath(magnet_xpath)[0]\n torrentfile_links = result.xpath(torrent_xpath)\n if torrentfile_links:\n torrentfile_link = torrentfile_links[0].attrib.get('href')\n else:\n torrentfile_link = None\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'magnetlink': magnetlink.attrib.get('href'),\n 'torrentfile': torrentfile_link,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/piratebay.py"}]} | 1,503 | 125 |
gh_patches_debug_250 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3944 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[elixir] make README consistent in style
The README for the `Elixir` module is rather a draft, we should polish it to make it consistent with the README files found in other modules.
</issue>
<code>
[start of colossalai/elixir/__init__.py]
1 from .wrapper import ElixirModule, ElixirOptimizer
2
[end of colossalai/elixir/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/elixir/__init__.py b/colossalai/elixir/__init__.py
--- a/colossalai/elixir/__init__.py
+++ b/colossalai/elixir/__init__.py
@@ -1 +1,4 @@
+from .search import minimum_waste_search, optimal_search
from .wrapper import ElixirModule, ElixirOptimizer
+
+__all__ = ['ElixirModule', 'ElixirOptimizer', 'minimum_waste_search', 'optimal_search']
| {"golden_diff": "diff --git a/colossalai/elixir/__init__.py b/colossalai/elixir/__init__.py\n--- a/colossalai/elixir/__init__.py\n+++ b/colossalai/elixir/__init__.py\n@@ -1 +1,4 @@\n+from .search import minimum_waste_search, optimal_search\n from .wrapper import ElixirModule, ElixirOptimizer\n+\n+__all__ = ['ElixirModule', 'ElixirOptimizer', 'minimum_waste_search', 'optimal_search']\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[elixir] make README consistent in style\nThe README for the `Elixir` module is rather a draft, we should polish it to make it consistent with the README files found in other modules.\n", "before_files": [{"content": "from .wrapper import ElixirModule, ElixirOptimizer\n", "path": "colossalai/elixir/__init__.py"}]} | 623 | 117 |
gh_patches_debug_17690 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-1688 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Method `given_function_called` should only take the function name into account.
# Bug report
## What's wrong
The method `given_function_called(node: ast.Call, to_check: Container[str]) -> str` in `logic.tree.functions` is described as a method that returns the name of the function being called in `node`, in case it is included in `to_check`. For example:
```python
# Let's imagine we are visiting the Call node in `print(123, 456)` stored in `node`
called_function = given_function_called(node, ['print'])
print(called_function)
# Prints `print`
# But, if we are visiting `datetime.timedelta(days=1)`
called_function = given_function_called(node, ['timedelta'])
print(called_function)
# Prints an empty string, as if `timedelta` had not been called. The way for it to be shown is:
called_function = given_function_called(node, ['datetime.timedelta'])
print(called_function)
# Prints `datetime.timedelta`
```
This is related to https://github.com/wemake-services/wemake-python-styleguide/pull/1676#discussion_r508471791
## How is that should be
```python
# If we are visiting `datetime.timedelta(days=1)`
called_function = given_function_called(node, ['timedelta'])
print(called_function)
# Prints `timedelta`
```
</issue>
<code>
[start of wemake_python_styleguide/logic/tree/functions.py]
1 from ast import Call, Yield, YieldFrom, arg
2 from typing import Container, List, Optional
3
4 from wemake_python_styleguide.compat.functions import get_posonlyargs
5 from wemake_python_styleguide.logic import source
6 from wemake_python_styleguide.logic.walk import is_contained
7 from wemake_python_styleguide.types import (
8 AnyFunctionDef,
9 AnyFunctionDefAndLambda,
10 )
11
12
13 def given_function_called(node: Call, to_check: Container[str]) -> str:
14 """
15 Returns function name if it is called and contained in the container.
16
17 >>> import ast
18 >>> module = ast.parse('print(123, 456)')
19 >>> given_function_called(module.body[0].value, ['print'])
20 'print'
21
22 >>> given_function_called(module.body[0].value, ['adjust'])
23 ''
24
25 """
26 function_name = source.node_to_string(node.func)
27 if function_name in to_check:
28 return function_name
29 return ''
30
31
32 def is_method(function_type: Optional[str]) -> bool:
33 """
34 Returns whether a given function type belongs to a class.
35
36 >>> is_method('function')
37 False
38
39 >>> is_method(None)
40 False
41
42 >>> is_method('method')
43 True
44
45 >>> is_method('classmethod')
46 True
47
48 >>> is_method('staticmethod')
49 True
50
51 >>> is_method('')
52 False
53
54 """
55 return function_type in {'method', 'classmethod', 'staticmethod'}
56
57
58 def get_all_arguments(node: AnyFunctionDefAndLambda) -> List[arg]:
59 """
60 Returns list of all arguments that exist in a function.
61
62 Respects the correct parameters order.
63 Positional only args, regular argument,
64 ``*args``, keyword-only, ``**kwargs``.
65
66 Positional only args are only added for ``python3.8+``
67 other versions are ignoring this type of arguments.
68 """
69 names = [
70 *get_posonlyargs(node),
71 *node.args.args,
72 ]
73
74 if node.args.vararg:
75 names.append(node.args.vararg)
76
77 names.extend(node.args.kwonlyargs)
78
79 if node.args.kwarg:
80 names.append(node.args.kwarg)
81
82 return names
83
84
85 def is_first_argument(node: AnyFunctionDefAndLambda, name: str) -> bool:
86 """Tells whether an argument name is the logically first in function."""
87 positional_args = [
88 *get_posonlyargs(node),
89 *node.args.args,
90 ]
91
92 if not positional_args:
93 return False
94
95 return name == positional_args[0].arg
96
97
98 def is_generator(node: AnyFunctionDef) -> bool:
99 """Tells whether a given function is a generator."""
100 for body_item in node.body:
101 if is_contained(node=body_item, to_check=(Yield, YieldFrom)):
102 return True
103 return False
104
[end of wemake_python_styleguide/logic/tree/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/logic/tree/functions.py b/wemake_python_styleguide/logic/tree/functions.py
--- a/wemake_python_styleguide/logic/tree/functions.py
+++ b/wemake_python_styleguide/logic/tree/functions.py
@@ -10,20 +10,21 @@
)
-def given_function_called(node: Call, to_check: Container[str]) -> str:
+def given_function_called(
+ node: Call,
+ to_check: Container[str],
+ *,
+ split_modules: bool = False,
+) -> str:
"""
Returns function name if it is called and contained in the container.
- >>> import ast
- >>> module = ast.parse('print(123, 456)')
- >>> given_function_called(module.body[0].value, ['print'])
- 'print'
-
- >>> given_function_called(module.body[0].value, ['adjust'])
- ''
-
+ If `split_modules`, takes the modules or objects into account. Otherwise,
+ it only cares about the function's name.
"""
function_name = source.node_to_string(node.func)
+ if split_modules:
+ function_name = function_name.split('.')[-1]
if function_name in to_check:
return function_name
return ''
| {"golden_diff": "diff --git a/wemake_python_styleguide/logic/tree/functions.py b/wemake_python_styleguide/logic/tree/functions.py\n--- a/wemake_python_styleguide/logic/tree/functions.py\n+++ b/wemake_python_styleguide/logic/tree/functions.py\n@@ -10,20 +10,21 @@\n )\n \n \n-def given_function_called(node: Call, to_check: Container[str]) -> str:\n+def given_function_called(\n+ node: Call,\n+ to_check: Container[str],\n+ *,\n+ split_modules: bool = False,\n+) -> str:\n \"\"\"\n Returns function name if it is called and contained in the container.\n \n- >>> import ast\n- >>> module = ast.parse('print(123, 456)')\n- >>> given_function_called(module.body[0].value, ['print'])\n- 'print'\n-\n- >>> given_function_called(module.body[0].value, ['adjust'])\n- ''\n-\n+ If `split_modules`, takes the modules or objects into account. Otherwise,\n+ it only cares about the function's name.\n \"\"\"\n function_name = source.node_to_string(node.func)\n+ if split_modules:\n+ function_name = function_name.split('.')[-1]\n if function_name in to_check:\n return function_name\n return ''\n", "issue": "Method `given_function_called` should only take the function name into account.\n# Bug report\r\n\r\n## What's wrong\r\n\r\nThe method `given_function_called(node: ast.Call, to_check: Container[str]) -> str` in `logic.tree.functions` is described as a method that returns the name of the function being called in `node`, in case it is included in `to_check`. For example:\r\n ```python\r\n# Let's imagine we are visiting the Call node in `print(123, 456)` stored in `node`\r\ncalled_function = given_function_called(node, ['print'])\r\nprint(called_function)\r\n# Prints `print`\r\n# But, if we are visiting `datetime.timedelta(days=1)`\r\ncalled_function = given_function_called(node, ['timedelta'])\r\nprint(called_function)\r\n# Prints an empty string, as if `timedelta` had not been called. The way for it to be shown is:\r\ncalled_function = given_function_called(node, ['datetime.timedelta'])\r\nprint(called_function)\r\n# Prints `datetime.timedelta`\r\n```\r\n\r\nThis is related to https://github.com/wemake-services/wemake-python-styleguide/pull/1676#discussion_r508471791\r\n\r\n## How is that should be\r\n\r\n```python\r\n# If we are visiting `datetime.timedelta(days=1)`\r\ncalled_function = given_function_called(node, ['timedelta'])\r\nprint(called_function)\r\n# Prints `timedelta`\r\n```\r\n\n", "before_files": [{"content": "from ast import Call, Yield, YieldFrom, arg\nfrom typing import Container, List, Optional\n\nfrom wemake_python_styleguide.compat.functions import get_posonlyargs\nfrom wemake_python_styleguide.logic import source\nfrom wemake_python_styleguide.logic.walk import is_contained\nfrom wemake_python_styleguide.types import (\n AnyFunctionDef,\n AnyFunctionDefAndLambda,\n)\n\n\ndef given_function_called(node: Call, to_check: Container[str]) -> str:\n \"\"\"\n Returns function name if it is called and contained in the container.\n\n >>> import ast\n >>> module = ast.parse('print(123, 456)')\n >>> given_function_called(module.body[0].value, ['print'])\n 'print'\n\n >>> given_function_called(module.body[0].value, ['adjust'])\n ''\n\n \"\"\"\n function_name = source.node_to_string(node.func)\n if function_name in to_check:\n return function_name\n return ''\n\n\ndef is_method(function_type: Optional[str]) -> bool:\n \"\"\"\n Returns whether a given function type belongs to a class.\n\n >>> is_method('function')\n False\n\n >>> is_method(None)\n False\n\n >>> is_method('method')\n True\n\n >>> is_method('classmethod')\n True\n\n >>> is_method('staticmethod')\n True\n\n >>> is_method('')\n False\n\n \"\"\"\n return function_type in {'method', 'classmethod', 'staticmethod'}\n\n\ndef get_all_arguments(node: AnyFunctionDefAndLambda) -> List[arg]:\n \"\"\"\n Returns list of all arguments that exist in a function.\n\n Respects the correct parameters order.\n Positional only args, regular argument,\n ``*args``, keyword-only, ``**kwargs``.\n\n Positional only args are only added for ``python3.8+``\n other versions are ignoring this type of arguments.\n \"\"\"\n names = [\n *get_posonlyargs(node),\n *node.args.args,\n ]\n\n if node.args.vararg:\n names.append(node.args.vararg)\n\n names.extend(node.args.kwonlyargs)\n\n if node.args.kwarg:\n names.append(node.args.kwarg)\n\n return names\n\n\ndef is_first_argument(node: AnyFunctionDefAndLambda, name: str) -> bool:\n \"\"\"Tells whether an argument name is the logically first in function.\"\"\"\n positional_args = [\n *get_posonlyargs(node),\n *node.args.args,\n ]\n\n if not positional_args:\n return False\n\n return name == positional_args[0].arg\n\n\ndef is_generator(node: AnyFunctionDef) -> bool:\n \"\"\"Tells whether a given function is a generator.\"\"\"\n for body_item in node.body:\n if is_contained(node=body_item, to_check=(Yield, YieldFrom)):\n return True\n return False\n", "path": "wemake_python_styleguide/logic/tree/functions.py"}]} | 1,677 | 285 |
gh_patches_debug_57811 | rasdani/github-patches | git_diff | mozilla__pontoon-3117 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hitting a server error when accessing a Tag page of a Tag without any resoures associated to it
This is a regression from https://github.com/mozilla/pontoon/commit/1dcd7382221f7b943b9b743ee32322f7233f6a86.
</issue>
<code>
[start of pontoon/tags/utils.py]
1 from django.db.models import Q, Max, Sum
2
3 from pontoon.base.models import TranslatedResource, Translation
4 from pontoon.tags.models import Tag
5
6
7 class Tags:
8 """This provides an API for retrieving related ``Tags`` for given filters,
9 providing statistical information and latest activity data.
10 """
11
12 def __init__(self, **kwargs):
13 self.project = kwargs.get("project")
14 self.locale = kwargs.get("locale")
15 self.slug = kwargs.get("slug")
16 self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()
17
18 def get(self):
19 tags = (
20 Tag.objects.filter(project=self.project, resources__isnull=False)
21 .distinct()
22 .order_by("-priority", "name")
23 )
24
25 chart = self.chart(Q(), "resource__tag")
26 latest_activity = self.latest_activity(Q(), "resource__tag")
27 for tag in tags:
28 tag.chart = chart.get(tag.pk)
29 tag.latest_activity = latest_activity.get(tag.pk)
30
31 return tags
32
33 def get_tag_locales(self):
34 tag = self.tag
35
36 if tag is None:
37 return None
38
39 chart = self.chart(Q(resource__tag=self.tag), "resource__tag")
40 tag.chart = chart.get(tag.pk)
41 tag.locales = self.project.locales.all()
42
43 locale_chart = self.chart(Q(resource__tag=self.tag), "locale")
44 locale_latest_activity = self.latest_activity(
45 Q(resource__tag=self.tag), "locale"
46 )
47 for locale in tag.locales:
48 locale.chart = locale_chart.get(locale.pk)
49 locale.latest_activity = locale_latest_activity.get(locale.pk)
50
51 return tag
52
53 def chart(self, query, group_by):
54 trs = (
55 self.translated_resources.filter(query)
56 .values(group_by)
57 .annotate(
58 total_strings=Sum("resource__total_strings"),
59 approved_strings=Sum("approved_strings"),
60 pretranslated_strings=Sum("pretranslated_strings"),
61 strings_with_errors=Sum("strings_with_errors"),
62 strings_with_warnings=Sum("strings_with_warnings"),
63 unreviewed_strings=Sum("unreviewed_strings"),
64 )
65 )
66
67 return {
68 tr[group_by]: TranslatedResource.get_chart_dict(
69 TranslatedResource(**{key: tr[key] for key in list(tr.keys())[1:]})
70 )
71 for tr in trs
72 }
73
74 def latest_activity(self, query, group_by):
75 latest_activity = {}
76 dates = {}
77 translations = Translation.objects.none()
78
79 trs = (
80 self.translated_resources.exclude(latest_translation__isnull=True)
81 .filter(query)
82 .values(group_by)
83 .annotate(
84 date=Max("latest_translation__date"),
85 approved_date=Max("latest_translation__approved_date"),
86 )
87 )
88
89 for tr in trs:
90 date = max(tr["date"], tr["approved_date"] or tr["date"])
91 dates[date] = tr[group_by]
92 prefix = "entity__" if group_by == "resource__tag" else ""
93
94 # Find translations with matching date and tag/locale
95 translations |= Translation.objects.filter(
96 Q(**{"date": date, f"{prefix}{group_by}": tr[group_by]})
97 ).prefetch_related("user", "approved_user")
98
99 for t in translations:
100 key = dates[t.latest_activity["date"]]
101 latest_activity[key] = t.latest_activity
102
103 return latest_activity
104
105 @property
106 def translated_resources(self):
107 trs = TranslatedResource.objects
108
109 if self.project is not None:
110 trs = trs.filter(resource__project=self.project)
111
112 if self.locale is not None:
113 trs = trs.filter(locale=self.locale)
114
115 return trs
116
[end of pontoon/tags/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/tags/utils.py b/pontoon/tags/utils.py
--- a/pontoon/tags/utils.py
+++ b/pontoon/tags/utils.py
@@ -13,7 +13,9 @@
self.project = kwargs.get("project")
self.locale = kwargs.get("locale")
self.slug = kwargs.get("slug")
- self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()
+ self.tag = Tag.objects.filter(
+ project=self.project, slug=self.slug, resources__isnull=False
+ ).first()
def get(self):
tags = (
| {"golden_diff": "diff --git a/pontoon/tags/utils.py b/pontoon/tags/utils.py\n--- a/pontoon/tags/utils.py\n+++ b/pontoon/tags/utils.py\n@@ -13,7 +13,9 @@\n self.project = kwargs.get(\"project\")\n self.locale = kwargs.get(\"locale\")\n self.slug = kwargs.get(\"slug\")\n- self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()\n+ self.tag = Tag.objects.filter(\n+ project=self.project, slug=self.slug, resources__isnull=False\n+ ).first()\n \n def get(self):\n tags = (\n", "issue": "Hitting a server error when accessing a Tag page of a Tag without any resoures associated to it\nThis is a regression from https://github.com/mozilla/pontoon/commit/1dcd7382221f7b943b9b743ee32322f7233f6a86.\n", "before_files": [{"content": "from django.db.models import Q, Max, Sum\n\nfrom pontoon.base.models import TranslatedResource, Translation\nfrom pontoon.tags.models import Tag\n\n\nclass Tags:\n \"\"\"This provides an API for retrieving related ``Tags`` for given filters,\n providing statistical information and latest activity data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.project = kwargs.get(\"project\")\n self.locale = kwargs.get(\"locale\")\n self.slug = kwargs.get(\"slug\")\n self.tag = Tag.objects.filter(project=self.project, slug=self.slug).first()\n\n def get(self):\n tags = (\n Tag.objects.filter(project=self.project, resources__isnull=False)\n .distinct()\n .order_by(\"-priority\", \"name\")\n )\n\n chart = self.chart(Q(), \"resource__tag\")\n latest_activity = self.latest_activity(Q(), \"resource__tag\")\n for tag in tags:\n tag.chart = chart.get(tag.pk)\n tag.latest_activity = latest_activity.get(tag.pk)\n\n return tags\n\n def get_tag_locales(self):\n tag = self.tag\n\n if tag is None:\n return None\n\n chart = self.chart(Q(resource__tag=self.tag), \"resource__tag\")\n tag.chart = chart.get(tag.pk)\n tag.locales = self.project.locales.all()\n\n locale_chart = self.chart(Q(resource__tag=self.tag), \"locale\")\n locale_latest_activity = self.latest_activity(\n Q(resource__tag=self.tag), \"locale\"\n )\n for locale in tag.locales:\n locale.chart = locale_chart.get(locale.pk)\n locale.latest_activity = locale_latest_activity.get(locale.pk)\n\n return tag\n\n def chart(self, query, group_by):\n trs = (\n self.translated_resources.filter(query)\n .values(group_by)\n .annotate(\n total_strings=Sum(\"resource__total_strings\"),\n approved_strings=Sum(\"approved_strings\"),\n pretranslated_strings=Sum(\"pretranslated_strings\"),\n strings_with_errors=Sum(\"strings_with_errors\"),\n strings_with_warnings=Sum(\"strings_with_warnings\"),\n unreviewed_strings=Sum(\"unreviewed_strings\"),\n )\n )\n\n return {\n tr[group_by]: TranslatedResource.get_chart_dict(\n TranslatedResource(**{key: tr[key] for key in list(tr.keys())[1:]})\n )\n for tr in trs\n }\n\n def latest_activity(self, query, group_by):\n latest_activity = {}\n dates = {}\n translations = Translation.objects.none()\n\n trs = (\n self.translated_resources.exclude(latest_translation__isnull=True)\n .filter(query)\n .values(group_by)\n .annotate(\n date=Max(\"latest_translation__date\"),\n approved_date=Max(\"latest_translation__approved_date\"),\n )\n )\n\n for tr in trs:\n date = max(tr[\"date\"], tr[\"approved_date\"] or tr[\"date\"])\n dates[date] = tr[group_by]\n prefix = \"entity__\" if group_by == \"resource__tag\" else \"\"\n\n # Find translations with matching date and tag/locale\n translations |= Translation.objects.filter(\n Q(**{\"date\": date, f\"{prefix}{group_by}\": tr[group_by]})\n ).prefetch_related(\"user\", \"approved_user\")\n\n for t in translations:\n key = dates[t.latest_activity[\"date\"]]\n latest_activity[key] = t.latest_activity\n\n return latest_activity\n\n @property\n def translated_resources(self):\n trs = TranslatedResource.objects\n\n if self.project is not None:\n trs = trs.filter(resource__project=self.project)\n\n if self.locale is not None:\n trs = trs.filter(locale=self.locale)\n\n return trs\n", "path": "pontoon/tags/utils.py"}]} | 1,655 | 134 |
gh_patches_debug_12816 | rasdani/github-patches | git_diff | ansible__ansible-3017 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support scripts dir in roles
Could we extend roles to support scripts?
So
```
- script: myscript.sh
```
Gets automagically sucked up from, roles/myrole/scripts/myscript.sh
?
</issue>
<code>
[start of lib/ansible/runner/action_plugins/script.py]
1 # (c) 2012, Michael DeHaan <[email protected]>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17
18 import os
19 import shlex
20
21 import ansible.constants as C
22 from ansible.utils import template
23 from ansible import utils
24 from ansible import errors
25 from ansible.runner.return_data import ReturnData
26
27 class ActionModule(object):
28
29 def __init__(self, runner):
30 self.runner = runner
31
32 def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
33 ''' handler for file transfer operations '''
34
35 if self.runner.check:
36 # in check mode, always skip this module
37 return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
38
39 tokens = shlex.split(module_args)
40 source = tokens[0]
41 # FIXME: error handling
42 args = " ".join(tokens[1:])
43 source = template.template(self.runner.basedir, source, inject)
44 source = utils.path_dwim(self.runner.basedir, source)
45
46 # transfer the file to a remote tmp location
47 source = source.replace('\x00','') # why does this happen here?
48 args = args.replace('\x00','') # why does this happen here?
49 tmp_src = os.path.join(tmp, os.path.basename(source))
50 tmp_src = tmp_src.replace('\x00', '')
51
52 conn.put_file(source, tmp_src)
53
54 # fix file permissions when the copy is done as a different user
55 if self.runner.sudo and self.runner.sudo_user != 'root':
56 prepcmd = 'chmod a+rx %s' % tmp_src
57 else:
58 prepcmd = 'chmod +x %s' % tmp_src
59
60 # add preparation steps to one ssh roundtrip executing the script
61 module_args = prepcmd + '; ' + tmp_src + ' ' + args
62
63 handler = utils.plugins.action_loader.get('raw', self.runner)
64 result = handler.run(conn, tmp, 'raw', module_args, inject)
65
66 # clean up after
67 if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1':
68 self.runner._low_level_exec_command(conn, 'rm -rf %s >/dev/null 2>&1' % tmp, tmp)
69
70 return result
71
[end of lib/ansible/runner/action_plugins/script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py
--- a/lib/ansible/runner/action_plugins/script.py
+++ b/lib/ansible/runner/action_plugins/script.py
@@ -41,7 +41,10 @@
# FIXME: error handling
args = " ".join(tokens[1:])
source = template.template(self.runner.basedir, source, inject)
- source = utils.path_dwim(self.runner.basedir, source)
+ if '_original_file' in inject:
+ source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
+ else:
+ source = utils.path_dwim(self.runner.basedir, source)
# transfer the file to a remote tmp location
source = source.replace('\x00','') # why does this happen here?
| {"golden_diff": "diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py\n--- a/lib/ansible/runner/action_plugins/script.py\n+++ b/lib/ansible/runner/action_plugins/script.py\n@@ -41,7 +41,10 @@\n # FIXME: error handling\n args = \" \".join(tokens[1:])\n source = template.template(self.runner.basedir, source, inject)\n- source = utils.path_dwim(self.runner.basedir, source)\n+ if '_original_file' in inject:\n+ source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)\n+ else:\n+ source = utils.path_dwim(self.runner.basedir, source)\n \n # transfer the file to a remote tmp location\n source = source.replace('\\x00','') # why does this happen here?\n", "issue": "Support scripts dir in roles\nCould we extend roles to support scripts?\n\nSo \n\n```\n- script: myscript.sh\n```\n\nGets automagically sucked up from, roles/myrole/scripts/myscript.sh\n\n?\n\n", "before_files": [{"content": "# (c) 2012, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\nimport shlex\n\nimport ansible.constants as C\nfrom ansible.utils import template\nfrom ansible import utils\nfrom ansible import errors\nfrom ansible.runner.return_data import ReturnData\n\nclass ActionModule(object):\n\n def __init__(self, runner):\n self.runner = runner\n\n def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):\n ''' handler for file transfer operations '''\n\n if self.runner.check:\n # in check mode, always skip this module\n return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))\n\n tokens = shlex.split(module_args)\n source = tokens[0]\n # FIXME: error handling\n args = \" \".join(tokens[1:])\n source = template.template(self.runner.basedir, source, inject)\n source = utils.path_dwim(self.runner.basedir, source)\n\n # transfer the file to a remote tmp location\n source = source.replace('\\x00','') # why does this happen here?\n args = args.replace('\\x00','') # why does this happen here?\n tmp_src = os.path.join(tmp, os.path.basename(source))\n tmp_src = tmp_src.replace('\\x00', '') \n\n conn.put_file(source, tmp_src)\n\n # fix file permissions when the copy is done as a different user\n if self.runner.sudo and self.runner.sudo_user != 'root':\n prepcmd = 'chmod a+rx %s' % tmp_src\n else:\n prepcmd = 'chmod +x %s' % tmp_src\n\n # add preparation steps to one ssh roundtrip executing the script\n module_args = prepcmd + '; ' + tmp_src + ' ' + args\n\n handler = utils.plugins.action_loader.get('raw', self.runner)\n result = handler.run(conn, tmp, 'raw', module_args, inject)\n\n # clean up after\n if tmp.find(\"tmp\") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1':\n self.runner._low_level_exec_command(conn, 'rm -rf %s >/dev/null 2>&1' % tmp, tmp)\n\n return result\n", "path": "lib/ansible/runner/action_plugins/script.py"}]} | 1,389 | 200 |
gh_patches_debug_2045 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1220 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tags with a '.' will crash
Ref. http://moonshine.online.ntnu.no/article/10/online-far-ny-nettside
</issue>
<code>
[start of apps/article/admin.py]
1 from django.contrib import admin
2 from apps.article.models import Article, Tag, ArticleTag
3 from django.conf import settings
4 from filebrowser.settings import VERSIONS, ADMIN_THUMBNAIL
5
6
7 class ArticleTagAdmin(admin.ModelAdmin):
8 model = ArticleTag
9
10
11 class ArticleTagInline(admin.TabularInline):
12 model = ArticleTag
13 max_num = 99
14 extra = 0
15
16
17 class TagAdmin(admin.ModelAdmin):
18 def save_model(self, request, obj, form, change):
19 obj.changed_by = request.user
20 if not change:
21 obj.created_by = request.user
22 obj.save()
23
24
25 class ArticleAdmin(admin.ModelAdmin):
26 inlines = (ArticleTagInline,)
27 list_display = ("heading", "created_by", "changed_by")
28
29 # set the created and changed by fields
30 def save_model(self, request, obj, form, change):
31 if (obj.image):
32 obj.image.version_generate(ADMIN_THUMBNAIL).url
33
34 # Itterate the different versions (by key)
35 for ver in VERSIONS.keys():
36 # Check if the key start with article_ (if it does, we want to crop to that size)
37 if ver.startswith('article_'):
38 obj.image.version_generate(ver).url
39
40 obj.changed_by = request.user
41
42 if not change:
43 obj.created_by = request.user
44 obj.save()
45
46 def save_formset(self, request, form, formset, change):
47 instances = formset.save(commit=False)
48 for instances in instances:
49 instances.save()
50
51 admin.site.register(Article, ArticleAdmin)
52 admin.site.register(Tag, TagAdmin)
53
[end of apps/article/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/article/admin.py b/apps/article/admin.py
--- a/apps/article/admin.py
+++ b/apps/article/admin.py
@@ -18,6 +18,7 @@
def save_model(self, request, obj, form, change):
obj.changed_by = request.user
if not change:
+ obj.name = obj.name.replace('.', '')
obj.created_by = request.user
obj.save()
| {"golden_diff": "diff --git a/apps/article/admin.py b/apps/article/admin.py\n--- a/apps/article/admin.py\n+++ b/apps/article/admin.py\n@@ -18,6 +18,7 @@\n def save_model(self, request, obj, form, change):\n obj.changed_by = request.user\n if not change:\n+ obj.name = obj.name.replace('.', '')\n obj.created_by = request.user\n obj.save()\n", "issue": "Tags with a '.' will crash\nRef. http://moonshine.online.ntnu.no/article/10/online-far-ny-nettside\n\n", "before_files": [{"content": "from django.contrib import admin\nfrom apps.article.models import Article, Tag, ArticleTag\nfrom django.conf import settings\nfrom filebrowser.settings import VERSIONS, ADMIN_THUMBNAIL\n\n\nclass ArticleTagAdmin(admin.ModelAdmin):\n model = ArticleTag\n\n\nclass ArticleTagInline(admin.TabularInline):\n model = ArticleTag\n max_num = 99\n extra = 0\n\n\nclass TagAdmin(admin.ModelAdmin):\n def save_model(self, request, obj, form, change):\n obj.changed_by = request.user\n if not change:\n obj.created_by = request.user\n obj.save()\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n inlines = (ArticleTagInline,)\n list_display = (\"heading\", \"created_by\", \"changed_by\")\n\n # set the created and changed by fields\n def save_model(self, request, obj, form, change):\n if (obj.image):\n obj.image.version_generate(ADMIN_THUMBNAIL).url\n\n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n obj.image.version_generate(ver).url\n\n obj.changed_by = request.user\n\n if not change:\n obj.created_by = request.user\n obj.save()\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n for instances in instances:\n instances.save()\n\nadmin.site.register(Article, ArticleAdmin)\nadmin.site.register(Tag, TagAdmin)\n", "path": "apps/article/admin.py"}]} | 1,012 | 90 |
gh_patches_debug_14853 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1276 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disable the Challenge Phase buttons which are inactive while making submissions
## Observed Behaviour
Currently, if a challenge phase is inactive then also a participant can select the phase to make submissions.
See screenshot for example:
<img width="1440" alt="screen shot 2017-08-08 at 7 21 16 pm" src="https://user-images.githubusercontent.com/2945708/29098709-ca4c67a8-7c6e-11e7-8729-73122eb9982e.png">
## Expected Behaviour
The challenge phases that are not active now should be ideally disabled and the user shouldn't be allowed to select those challenge phases.
Disable the Challenge Phase buttons which are inactive while making submissions
## Observed Behaviour
Currently, if a challenge phase is inactive then also a participant can select the phase to make submissions.
See screenshot for example:
<img width="1440" alt="screen shot 2017-08-08 at 7 21 16 pm" src="https://user-images.githubusercontent.com/2945708/29098709-ca4c67a8-7c6e-11e7-8729-73122eb9982e.png">
## Expected Behaviour
The challenge phases that are not active now should be ideally disabled and the user shouldn't be allowed to select those challenge phases.
</issue>
<code>
[start of apps/participants/serializers.py]
1 from django.contrib.auth.models import User
2
3 from rest_framework import serializers
4
5 from challenges.serializers import ChallengeSerializer
6 from .models import (Participant, ParticipantTeam)
7
8
9 class ParticipantTeamSerializer(serializers.ModelSerializer):
10 """Serializer class to map Participants to Teams."""
11 created_by = serializers.SlugRelatedField(slug_field='username', queryset=User.objects.all())
12
13 def __init__(self, *args, **kwargs):
14 super(ParticipantTeamSerializer, self).__init__(*args, **kwargs)
15 context = kwargs.get('context')
16 if context:
17 request = context.get('request')
18 kwargs['data']['created_by'] = request.user.username
19
20 class Meta:
21 model = ParticipantTeam
22 fields = ('id', 'team_name', 'created_by')
23
24
25 class InviteParticipantToTeamSerializer(serializers.Serializer):
26 """Serializer class for inviting Participant to Team."""
27 email = serializers.EmailField()
28
29 def __init__(self, *args, **kwargs):
30 super(InviteParticipantToTeamSerializer, self).__init__(*args, **kwargs)
31 context = kwargs.get('context')
32 if context:
33 self.participant_team = context.get('participant_team')
34 self.user = context.get('request').user
35
36 def validate_email(self, value):
37 if value == self.user.email:
38 raise serializers.ValidationError('A participant cannot invite himself')
39 try:
40 User.objects.get(email=value)
41 except User.DoesNotExist:
42 raise serializers.ValidationError('User does not exist')
43 return value
44
45 def save(self):
46 email = self.validated_data.get('email')
47 return Participant.objects.get_or_create(user=User.objects.get(email=email),
48 status=Participant.ACCEPTED,
49 team=self.participant_team)
50
51
52 class ParticipantSerializer(serializers.ModelSerializer):
53 """Serializer class for Participants."""
54 member_name = serializers.SerializerMethodField()
55 member_id = serializers.SerializerMethodField()
56
57 class Meta:
58 model = Participant
59 fields = ('member_name', 'status', 'member_id')
60
61 def get_member_name(self, obj):
62 return obj.user.username
63
64 def get_member_id(self, obj):
65 return obj.user.id
66
67
68 class ParticipantTeamDetailSerializer(serializers.ModelSerializer):
69 """Serializer for Participant Teams and Participant Combined."""
70 members = serializers.SerializerMethodField()
71 created_by = serializers.SlugRelatedField(slug_field='username', queryset=User.objects.all())
72
73 class Meta:
74 model = ParticipantTeam
75 fields = ('id', 'team_name', 'created_by', 'members')
76
77 def get_members(self, obj):
78 participants = Participant.objects.filter(team__pk=obj.id)
79 serializer = ParticipantSerializer(participants, many=True)
80 return serializer.data
81
82
83 class ChallengeParticipantTeam(object):
84 """Serializer to map Challenge and Participant Teams."""
85 def __init__(self, challenge, participant_team):
86 self.challenge = challenge
87 self.participant_team = participant_team
88
89
90 class ChallengeParticipantTeamSerializer(serializers.Serializer):
91 """Serializer to initialize Challenge and Participant's Team"""
92 challenge = ChallengeSerializer()
93 participant_team = ParticipantTeamSerializer()
94
95
96 class ChallengeParticipantTeamList(object):
97 """Class to create a list of Challenge and Participant Teams."""
98 def __init__(self, challenge_participant_team_list):
99 self.challenge_participant_team_list = challenge_participant_team_list
100
101
102 class ChallengeParticipantTeamListSerializer(serializers.Serializer):
103 """Serializer to map a challenge's participant team lists."""
104 challenge_participant_team_list = ChallengeParticipantTeamSerializer(many=True)
105
106
107 class ParticipantTeamCount(object):
108 def __init__(self, participant_team_count):
109 self.participant_team_count = participant_team_count
110
111
112 class ParticipantTeamCountSerializer(serializers.Serializer):
113 participant_team_count = serializers.IntegerField()
114
115
116 class ParticipantCount(object):
117 def __init__(self, participant_count):
118 self.participant_count = participant_count
119
120
121 class ParticipantCountSerializer(serializers.Serializer):
122 participant_count = serializers.IntegerField()
123
[end of apps/participants/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/participants/serializers.py b/apps/participants/serializers.py
--- a/apps/participants/serializers.py
+++ b/apps/participants/serializers.py
@@ -1,4 +1,5 @@
from django.contrib.auth.models import User
+from django.utils import timezone
from rest_framework import serializers
@@ -102,6 +103,10 @@
class ChallengeParticipantTeamListSerializer(serializers.Serializer):
"""Serializer to map a challenge's participant team lists."""
challenge_participant_team_list = ChallengeParticipantTeamSerializer(many=True)
+ datetime_now = serializers.SerializerMethodField()
+
+ def get_datetime_now(self, obj):
+ return timezone.now()
class ParticipantTeamCount(object):
| {"golden_diff": "diff --git a/apps/participants/serializers.py b/apps/participants/serializers.py\n--- a/apps/participants/serializers.py\n+++ b/apps/participants/serializers.py\n@@ -1,4 +1,5 @@\n from django.contrib.auth.models import User\n+from django.utils import timezone\n \n from rest_framework import serializers\n \n@@ -102,6 +103,10 @@\n class ChallengeParticipantTeamListSerializer(serializers.Serializer):\n \"\"\"Serializer to map a challenge's participant team lists.\"\"\"\n challenge_participant_team_list = ChallengeParticipantTeamSerializer(many=True)\n+ datetime_now = serializers.SerializerMethodField()\n+\n+ def get_datetime_now(self, obj):\n+ return timezone.now()\n \n \n class ParticipantTeamCount(object):\n", "issue": "Disable the Challenge Phase buttons which are inactive while making submissions\n## Observed Behaviour\r\nCurrently, if a challenge phase is inactive then also a participant can select the phase to make submissions. \r\n\r\nSee screenshot for example:\r\n<img width=\"1440\" alt=\"screen shot 2017-08-08 at 7 21 16 pm\" src=\"https://user-images.githubusercontent.com/2945708/29098709-ca4c67a8-7c6e-11e7-8729-73122eb9982e.png\">\r\n\r\n## Expected Behaviour\r\n\r\nThe challenge phases that are not active now should be ideally disabled and the user shouldn't be allowed to select those challenge phases. \nDisable the Challenge Phase buttons which are inactive while making submissions\n## Observed Behaviour\r\nCurrently, if a challenge phase is inactive then also a participant can select the phase to make submissions. \r\n\r\nSee screenshot for example:\r\n<img width=\"1440\" alt=\"screen shot 2017-08-08 at 7 21 16 pm\" src=\"https://user-images.githubusercontent.com/2945708/29098709-ca4c67a8-7c6e-11e7-8729-73122eb9982e.png\">\r\n\r\n## Expected Behaviour\r\n\r\nThe challenge phases that are not active now should be ideally disabled and the user shouldn't be allowed to select those challenge phases. \n", "before_files": [{"content": "from django.contrib.auth.models import User\n\nfrom rest_framework import serializers\n\nfrom challenges.serializers import ChallengeSerializer\nfrom .models import (Participant, ParticipantTeam)\n\n\nclass ParticipantTeamSerializer(serializers.ModelSerializer):\n \"\"\"Serializer class to map Participants to Teams.\"\"\"\n created_by = serializers.SlugRelatedField(slug_field='username', queryset=User.objects.all())\n\n def __init__(self, *args, **kwargs):\n super(ParticipantTeamSerializer, self).__init__(*args, **kwargs)\n context = kwargs.get('context')\n if context:\n request = context.get('request')\n kwargs['data']['created_by'] = request.user.username\n\n class Meta:\n model = ParticipantTeam\n fields = ('id', 'team_name', 'created_by')\n\n\nclass InviteParticipantToTeamSerializer(serializers.Serializer):\n \"\"\"Serializer class for inviting Participant to Team.\"\"\"\n email = serializers.EmailField()\n\n def __init__(self, *args, **kwargs):\n super(InviteParticipantToTeamSerializer, self).__init__(*args, **kwargs)\n context = kwargs.get('context')\n if context:\n self.participant_team = context.get('participant_team')\n self.user = context.get('request').user\n\n def validate_email(self, value):\n if value == self.user.email:\n raise serializers.ValidationError('A participant cannot invite himself')\n try:\n User.objects.get(email=value)\n except User.DoesNotExist:\n raise serializers.ValidationError('User does not exist')\n return value\n\n def save(self):\n email = self.validated_data.get('email')\n return Participant.objects.get_or_create(user=User.objects.get(email=email),\n status=Participant.ACCEPTED,\n team=self.participant_team)\n\n\nclass ParticipantSerializer(serializers.ModelSerializer):\n \"\"\"Serializer class for Participants.\"\"\"\n member_name = serializers.SerializerMethodField()\n member_id = serializers.SerializerMethodField()\n\n class Meta:\n model = Participant\n fields = ('member_name', 'status', 'member_id')\n\n def get_member_name(self, obj):\n return obj.user.username\n\n def get_member_id(self, obj):\n return obj.user.id\n\n\nclass ParticipantTeamDetailSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for Participant Teams and Participant Combined.\"\"\"\n members = serializers.SerializerMethodField()\n created_by = serializers.SlugRelatedField(slug_field='username', queryset=User.objects.all())\n\n class Meta:\n model = ParticipantTeam\n fields = ('id', 'team_name', 'created_by', 'members')\n\n def get_members(self, obj):\n participants = Participant.objects.filter(team__pk=obj.id)\n serializer = ParticipantSerializer(participants, many=True)\n return serializer.data\n\n\nclass ChallengeParticipantTeam(object):\n \"\"\"Serializer to map Challenge and Participant Teams.\"\"\"\n def __init__(self, challenge, participant_team):\n self.challenge = challenge\n self.participant_team = participant_team\n\n\nclass ChallengeParticipantTeamSerializer(serializers.Serializer):\n \"\"\"Serializer to initialize Challenge and Participant's Team\"\"\"\n challenge = ChallengeSerializer()\n participant_team = ParticipantTeamSerializer()\n\n\nclass ChallengeParticipantTeamList(object):\n \"\"\"Class to create a list of Challenge and Participant Teams.\"\"\"\n def __init__(self, challenge_participant_team_list):\n self.challenge_participant_team_list = challenge_participant_team_list\n\n\nclass ChallengeParticipantTeamListSerializer(serializers.Serializer):\n \"\"\"Serializer to map a challenge's participant team lists.\"\"\"\n challenge_participant_team_list = ChallengeParticipantTeamSerializer(many=True)\n\n\nclass ParticipantTeamCount(object):\n def __init__(self, participant_team_count):\n self.participant_team_count = participant_team_count\n\n\nclass ParticipantTeamCountSerializer(serializers.Serializer):\n participant_team_count = serializers.IntegerField()\n\n\nclass ParticipantCount(object):\n def __init__(self, participant_count):\n self.participant_count = participant_count\n\n\nclass ParticipantCountSerializer(serializers.Serializer):\n participant_count = serializers.IntegerField()\n", "path": "apps/participants/serializers.py"}]} | 1,956 | 157 |
gh_patches_debug_28152 | rasdani/github-patches | git_diff | rasterio__rasterio-662 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
empty window intersection
I've been playing around with the cool windowing functions @brendan-ward put in a while back. I'm wondering if this is the desired behavior when two windows intersect at a point or along a row/column. Because of the ways windows work in rasterio, the upper row/column bound is open, so `windows_intersect` is returning `True` but returning an empty array, e.g.
```
>>> with rasterio.open('tests/data/RGB.byte.tif') as src:
... w1 = ((0, 2), (0, 2))
... w2 = ((2, 4), (2, 4))
... if windows_intersect((w1, w2)): #returns True
... print(src.read(1, window=window_intersection((w1, w2))))
[]
```
True, the windows do intersect at a point, but I assume the way people will use `window_intersection` is to expect a window that they can use in further processing.
</issue>
<code>
[start of rasterio/windows.py]
1 """Windows and related functions."""
2
3
4 def get_data_window(arr, nodata=None):
5 """Return a window for the non-nodata pixels within the input array.
6
7 Parameters
8 ----------
9 arr: numpy ndarray, <= 3 dimensions
10 nodata: number
11 If None, will either return a full window if arr is not a masked
12 array, or will use the mask to determine non-nodata pixels.
13 If provided, it must be a number within the valid range of the dtype
14 of the input array.
15
16 Returns
17 -------
18 ((row_start, row_stop), (col_start, col_stop))
19
20 """
21 from rasterio._io import get_data_window
22 return get_data_window(arr, nodata)
23
24
25 def union(windows):
26 """Union windows and return the outermost extent they cover.
27
28 Parameters
29 ----------
30 windows: list-like of window objects
31 ((row_start, row_stop), (col_start, col_stop))
32
33 Returns
34 -------
35 ((row_start, row_stop), (col_start, col_stop))
36 """
37 from rasterio._io import window_union
38 return window_union(windows)
39
40
41 def intersection(windows):
42 """Intersect windows and return the innermost extent they cover.
43
44 Will raise ValueError if windows do not intersect.
45
46 Parameters
47 ----------
48 windows: list-like of window objects
49 ((row_start, row_stop), (col_start, col_stop))
50
51 Returns
52 -------
53 ((row_start, row_stop), (col_start, col_stop))
54 """
55 from rasterio._io import window_intersection
56 return window_intersection(windows)
57
58
59 def intersect(windows):
60 """Test if windows intersect.
61
62 Parameters
63 ----------
64 windows: list-like of window objects
65 ((row_start, row_stop), (col_start, col_stop))
66
67 Returns
68 -------
69 boolean:
70 True if all windows intersect.
71 """
72 from rasterio._io import windows_intersect
73 return windows_intersect(windows)
74
[end of rasterio/windows.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/windows.py b/rasterio/windows.py
--- a/rasterio/windows.py
+++ b/rasterio/windows.py
@@ -1,5 +1,21 @@
"""Windows and related functions."""
+import functools
+import collections
+
+
+def iter_args(function):
+ """Decorator to allow function to take either *args or
+ a single iterable which gets expanded to *args.
+ """
+ @functools.wraps(function)
+ def wrapper(*args, **kwargs):
+ if len(args) == 1 and isinstance(args[0], collections.Iterable):
+ return function(*args[0])
+ else:
+ return function(*args)
+ return wrapper
+
def get_data_window(arr, nodata=None):
"""Return a window for the non-nodata pixels within the input array.
@@ -22,7 +38,8 @@
return get_data_window(arr, nodata)
-def union(windows):
+@iter_args
+def union(*windows):
"""Union windows and return the outermost extent they cover.
Parameters
@@ -38,7 +55,8 @@
return window_union(windows)
-def intersection(windows):
+@iter_args
+def intersection(*windows):
"""Intersect windows and return the innermost extent they cover.
Will raise ValueError if windows do not intersect.
@@ -56,7 +74,8 @@
return window_intersection(windows)
-def intersect(windows):
+@iter_args
+def intersect(*windows):
"""Test if windows intersect.
Parameters
| {"golden_diff": "diff --git a/rasterio/windows.py b/rasterio/windows.py\n--- a/rasterio/windows.py\n+++ b/rasterio/windows.py\n@@ -1,5 +1,21 @@\n \"\"\"Windows and related functions.\"\"\"\n \n+import functools\n+import collections\n+\n+\n+def iter_args(function):\n+ \"\"\"Decorator to allow function to take either *args or\n+ a single iterable which gets expanded to *args.\n+ \"\"\"\n+ @functools.wraps(function)\n+ def wrapper(*args, **kwargs):\n+ if len(args) == 1 and isinstance(args[0], collections.Iterable):\n+ return function(*args[0])\n+ else:\n+ return function(*args)\n+ return wrapper\n+\n \n def get_data_window(arr, nodata=None):\n \"\"\"Return a window for the non-nodata pixels within the input array.\n@@ -22,7 +38,8 @@\n return get_data_window(arr, nodata)\n \n \n-def union(windows):\n+@iter_args\n+def union(*windows):\n \"\"\"Union windows and return the outermost extent they cover.\n \n Parameters\n@@ -38,7 +55,8 @@\n return window_union(windows)\n \n \n-def intersection(windows):\n+@iter_args\n+def intersection(*windows):\n \"\"\"Intersect windows and return the innermost extent they cover.\n \n Will raise ValueError if windows do not intersect.\n@@ -56,7 +74,8 @@\n return window_intersection(windows)\n \n \n-def intersect(windows):\n+@iter_args\n+def intersect(*windows):\n \"\"\"Test if windows intersect.\n \n Parameters\n", "issue": "empty window intersection\nI've been playing around with the cool windowing functions @brendan-ward put in a while back. I'm wondering if this is the desired behavior when two windows intersect at a point or along a row/column. Because of the ways windows work in rasterio, the upper row/column bound is open, so `windows_intersect` is returning `True` but returning an empty array, e.g.\n\n```\n>>> with rasterio.open('tests/data/RGB.byte.tif') as src:\n... w1 = ((0, 2), (0, 2))\n... w2 = ((2, 4), (2, 4))\n... if windows_intersect((w1, w2)): #returns True\n... print(src.read(1, window=window_intersection((w1, w2))))\n[]\n```\n\nTrue, the windows do intersect at a point, but I assume the way people will use `window_intersection` is to expect a window that they can use in further processing. \n\n", "before_files": [{"content": "\"\"\"Windows and related functions.\"\"\"\n\n\ndef get_data_window(arr, nodata=None):\n \"\"\"Return a window for the non-nodata pixels within the input array.\n\n Parameters\n ----------\n arr: numpy ndarray, <= 3 dimensions\n nodata: number\n If None, will either return a full window if arr is not a masked\n array, or will use the mask to determine non-nodata pixels.\n If provided, it must be a number within the valid range of the dtype\n of the input array.\n\n Returns\n -------\n ((row_start, row_stop), (col_start, col_stop))\n\n \"\"\"\n from rasterio._io import get_data_window\n return get_data_window(arr, nodata)\n\n\ndef union(windows):\n \"\"\"Union windows and return the outermost extent they cover.\n\n Parameters\n ----------\n windows: list-like of window objects\n ((row_start, row_stop), (col_start, col_stop))\n\n Returns\n -------\n ((row_start, row_stop), (col_start, col_stop))\n \"\"\"\n from rasterio._io import window_union\n return window_union(windows)\n\n\ndef intersection(windows):\n \"\"\"Intersect windows and return the innermost extent they cover.\n\n Will raise ValueError if windows do not intersect.\n\n Parameters\n ----------\n windows: list-like of window objects\n ((row_start, row_stop), (col_start, col_stop))\n\n Returns\n -------\n ((row_start, row_stop), (col_start, col_stop))\n \"\"\"\n from rasterio._io import window_intersection\n return window_intersection(windows)\n\n\ndef intersect(windows):\n \"\"\"Test if windows intersect.\n\n Parameters\n ----------\n windows: list-like of window objects\n ((row_start, row_stop), (col_start, col_stop))\n\n Returns\n -------\n boolean:\n True if all windows intersect.\n \"\"\"\n from rasterio._io import windows_intersect\n return windows_intersect(windows)\n", "path": "rasterio/windows.py"}]} | 1,315 | 352 |
gh_patches_debug_26285 | rasdani/github-patches | git_diff | pypi__warehouse-568 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use the latest version of pip-tools
</issue>
<code>
[start of setup.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import os
14
15 import setuptools
16
17
18 base_dir = os.path.dirname(__file__)
19
20 about = {}
21 with open(os.path.join(base_dir, "warehouse", "__about__.py")) as f:
22 exec(f.read(), about)
23
24 with open(os.path.join(base_dir, "README.rst")) as f:
25 long_description = f.read()
26
27
28 setuptools.setup(
29 name=about["__title__"],
30 version=about["__version__"],
31
32 description=about["__summary__"],
33 long_description=long_description,
34 license=about["__license__"],
35 url=about["__uri__"],
36
37 author=about["__author__"],
38 author_email=about["__email__"],
39
40 classifiers=[
41 "Intended Audience :: Developers",
42
43 "License :: OSI Approved :: Apache Software License",
44
45 "Programming Language :: Python",
46 "Programming Language :: Python :: 3",
47 "Programming Language :: Python :: 3.4",
48 ],
49
50 packages=[
51 "warehouse",
52 "warehouse.accounts",
53 "warehouse.cache",
54 "warehouse.cache.origin",
55 "warehouse.classifiers",
56 "warehouse.cli",
57 "warehouse.cli.db",
58 "warehouse.i18n",
59 "warehouse.legacy",
60 "warehouse.legacy.api",
61 "warehouse.migrations",
62 "warehouse.packaging",
63 "warehouse.utils",
64 ],
65
66 include_package_data=True,
67
68 install_requires=[
69 "alembic>=0.7.0",
70 "Babel",
71 "bcrypt",
72 "boto3",
73 "click",
74 "fs",
75 "gunicorn",
76 "hiredis",
77 "html5lib",
78 "itsdangerous",
79 "msgpack-python",
80 "passlib>=1.6.4",
81 "psycopg2",
82 "pyramid>=1.6a1",
83 "pyramid_jinja2>=2.5",
84 "pyramid_multiauth",
85 "pyramid_services",
86 "pyramid_tm>=0.12",
87 "readme>=0.5.1",
88 "redis",
89 "setproctitle",
90 "sqlalchemy>=0.9",
91 "sqlalchemy-citext",
92 "structlog",
93 "transaction",
94 "WTForms>=2.0.0",
95 "zope.sqlalchemy",
96 ],
97
98 entry_points={
99 "console_scripts": [
100 "warehouse = warehouse.cli:warehouse",
101 ],
102 },
103 )
104
[end of setup.py]
[start of tasks/pip.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import os
14 import re
15
16 import invoke
17
18
19 REQUIREMENTS_HEADER = """
20 #
21 # This file is autogenerated by pip-compile
22 # Make changes in setup.py, then run this to update:
23 #
24 # $ invoke pip.compile
25 #
26
27 -f https://github.com/Pylons/webob/archive/master.zip#egg=webob-1.5.dev0
28
29 """.lstrip()
30
31
32 @invoke.task
33 def compile():
34 with open("requirements.in", "w") as fp:
35 fp.write("-e .")
36
37 try:
38 invoke.run("pip-compile --no-header requirements.in", hide="out")
39 finally:
40 os.remove("requirements.in")
41
42 lines = [REQUIREMENTS_HEADER]
43 with open("requirements.txt", "r") as fp:
44 for line in fp:
45 line = re.sub(r"^webob==(\S+)(.*)$", r"webob==1.5.dev0\2", line)
46
47 # The boto3 wheel includes a futures==2.2.0 even though that is a
48 # Python 2 only dependency. This dependency comes by default on
49 # Python 3, so the backport is never needed. See boto/boto3#163.
50 if re.search(r"^futures==2\.2\.0", line.strip()) is not None:
51 continue
52
53 if re.search(r"^-e file:///.+/warehouse$", line.strip()) is None:
54 lines.append(line)
55
56 with open("requirements.txt", "w") as fp:
57 for line in lines:
58 fp.write(line)
59
[end of tasks/pip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,6 +76,7 @@
"hiredis",
"html5lib",
"itsdangerous",
+ "Jinja2>=2.8",
"msgpack-python",
"passlib>=1.6.4",
"psycopg2",
@@ -91,6 +92,7 @@
"sqlalchemy-citext",
"structlog",
"transaction",
+ "WebOb>=1.5.0a0",
"WTForms>=2.0.0",
"zope.sqlalchemy",
],
diff --git a/tasks/pip.py b/tasks/pip.py
--- a/tasks/pip.py
+++ b/tasks/pip.py
@@ -24,8 +24,6 @@
# $ invoke pip.compile
#
--f https://github.com/Pylons/webob/archive/master.zip#egg=webob-1.5.dev0
-
""".lstrip()
@@ -42,8 +40,6 @@
lines = [REQUIREMENTS_HEADER]
with open("requirements.txt", "r") as fp:
for line in fp:
- line = re.sub(r"^webob==(\S+)(.*)$", r"webob==1.5.dev0\2", line)
-
# The boto3 wheel includes a futures==2.2.0 even though that is a
# Python 2 only dependency. This dependency comes by default on
# Python 3, so the backport is never needed. See boto/boto3#163.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -76,6 +76,7 @@\n \"hiredis\",\n \"html5lib\",\n \"itsdangerous\",\n+ \"Jinja2>=2.8\",\n \"msgpack-python\",\n \"passlib>=1.6.4\",\n \"psycopg2\",\n@@ -91,6 +92,7 @@\n \"sqlalchemy-citext\",\n \"structlog\",\n \"transaction\",\n+ \"WebOb>=1.5.0a0\",\n \"WTForms>=2.0.0\",\n \"zope.sqlalchemy\",\n ],\ndiff --git a/tasks/pip.py b/tasks/pip.py\n--- a/tasks/pip.py\n+++ b/tasks/pip.py\n@@ -24,8 +24,6 @@\n # $ invoke pip.compile\n #\n \n--f https://github.com/Pylons/webob/archive/master.zip#egg=webob-1.5.dev0\n-\n \"\"\".lstrip()\n \n \n@@ -42,8 +40,6 @@\n lines = [REQUIREMENTS_HEADER]\n with open(\"requirements.txt\", \"r\") as fp:\n for line in fp:\n- line = re.sub(r\"^webob==(\\S+)(.*)$\", r\"webob==1.5.dev0\\2\", line)\n-\n # The boto3 wheel includes a futures==2.2.0 even though that is a\n # Python 2 only dependency. This dependency comes by default on\n # Python 3, so the backport is never needed. See boto/boto3#163.\n", "issue": "Use the latest version of pip-tools\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport setuptools\n\n\nbase_dir = os.path.dirname(__file__)\n\nabout = {}\nwith open(os.path.join(base_dir, \"warehouse\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetuptools.setup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n\n \"License :: OSI Approved :: Apache Software License\",\n\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n ],\n\n packages=[\n \"warehouse\",\n \"warehouse.accounts\",\n \"warehouse.cache\",\n \"warehouse.cache.origin\",\n \"warehouse.classifiers\",\n \"warehouse.cli\",\n \"warehouse.cli.db\",\n \"warehouse.i18n\",\n \"warehouse.legacy\",\n \"warehouse.legacy.api\",\n \"warehouse.migrations\",\n \"warehouse.packaging\",\n \"warehouse.utils\",\n ],\n\n include_package_data=True,\n\n install_requires=[\n \"alembic>=0.7.0\",\n \"Babel\",\n \"bcrypt\",\n \"boto3\",\n \"click\",\n \"fs\",\n \"gunicorn\",\n \"hiredis\",\n \"html5lib\",\n \"itsdangerous\",\n \"msgpack-python\",\n \"passlib>=1.6.4\",\n \"psycopg2\",\n \"pyramid>=1.6a1\",\n \"pyramid_jinja2>=2.5\",\n \"pyramid_multiauth\",\n \"pyramid_services\",\n \"pyramid_tm>=0.12\",\n \"readme>=0.5.1\",\n \"redis\",\n \"setproctitle\",\n \"sqlalchemy>=0.9\",\n \"sqlalchemy-citext\",\n \"structlog\",\n \"transaction\",\n \"WTForms>=2.0.0\",\n \"zope.sqlalchemy\",\n ],\n\n entry_points={\n \"console_scripts\": [\n \"warehouse = warehouse.cli:warehouse\",\n ],\n },\n)\n", "path": "setup.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nimport invoke\n\n\nREQUIREMENTS_HEADER = \"\"\"\n#\n# This file is autogenerated by pip-compile\n# Make changes in setup.py, then run this to update:\n#\n# $ invoke pip.compile\n#\n\n-f https://github.com/Pylons/webob/archive/master.zip#egg=webob-1.5.dev0\n\n\"\"\".lstrip()\n\n\[email protected]\ndef compile():\n with open(\"requirements.in\", \"w\") as fp:\n fp.write(\"-e .\")\n\n try:\n invoke.run(\"pip-compile --no-header requirements.in\", hide=\"out\")\n finally:\n os.remove(\"requirements.in\")\n\n lines = [REQUIREMENTS_HEADER]\n with open(\"requirements.txt\", \"r\") as fp:\n for line in fp:\n line = re.sub(r\"^webob==(\\S+)(.*)$\", r\"webob==1.5.dev0\\2\", line)\n\n # The boto3 wheel includes a futures==2.2.0 even though that is a\n # Python 2 only dependency. This dependency comes by default on\n # Python 3, so the backport is never needed. See boto/boto3#163.\n if re.search(r\"^futures==2\\.2\\.0\", line.strip()) is not None:\n continue\n\n if re.search(r\"^-e file:///.+/warehouse$\", line.strip()) is None:\n lines.append(line)\n\n with open(\"requirements.txt\", \"w\") as fp:\n for line in lines:\n fp.write(line)\n", "path": "tasks/pip.py"}]} | 1,964 | 362 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.